Bug Summary

File:build/gcc/ira-color.cc
Warning:line 3647, column 2
Value stored to 'subloop_node' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-suse-linux -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ira-color.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -resource-dir /usr/lib64/clang/15.0.7 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/backward -internal-isystem /usr/lib64/clang/15.0.7/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../x86_64-suse-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2023-03-27-141847-20772-1/report-wlk0oI.plist -x c++ /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc
1/* IRA allocation based on graph coloring.
2 Copyright (C) 2006-2023 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "target.h"
26#include "rtl.h"
27#include "tree.h"
28#include "predict.h"
29#include "df.h"
30#include "memmodel.h"
31#include "tm_p.h"
32#include "insn-config.h"
33#include "regs.h"
34#include "ira.h"
35#include "ira-int.h"
36#include "reload.h"
37#include "cfgloop.h"
38
39/* To prevent soft conflict detection becoming quadratic in the
40 loop depth. Only for very pathological cases, so it hardly
41 seems worth a --param. */
42const int max_soft_conflict_loop_depth = 64;
43
44typedef struct allocno_hard_regs *allocno_hard_regs_t;
45
46/* The structure contains information about hard registers can be
47 assigned to allocnos. Usually it is allocno profitable hard
48 registers but in some cases this set can be a bit different. Major
49 reason of the difference is a requirement to use hard register sets
50 that form a tree or a forest (set of trees), i.e. hard register set
51 of a node should contain hard register sets of its subnodes. */
52struct allocno_hard_regs
53{
54 /* Hard registers can be assigned to an allocno. */
55 HARD_REG_SET set;
56 /* Overall (spilling) cost of all allocnos with given register
57 set. */
58 int64_t cost;
59};
60
61typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
62
63/* A node representing allocno hard registers. Such nodes form a
64 forest (set of trees). Each subnode of given node in the forest
65 refers for hard register set (usually allocno profitable hard
66 register set) which is a subset of one referred from given
67 node. */
68struct allocno_hard_regs_node
69{
70 /* Set up number of the node in preorder traversing of the forest. */
71 int preorder_num;
72 /* Used for different calculation like finding conflict size of an
73 allocno. */
74 int check;
75 /* Used for calculation of conflict size of an allocno. The
76 conflict size of the allocno is maximal number of given allocno
77 hard registers needed for allocation of the conflicting allocnos.
78 Given allocno is trivially colored if this number plus the number
79 of hard registers needed for given allocno is not greater than
80 the number of given allocno hard register set. */
81 int conflict_size;
82 /* The number of hard registers given by member hard_regs. */
83 int hard_regs_num;
84 /* The following member is used to form the final forest. */
85 bool used_p;
86 /* Pointer to the corresponding profitable hard registers. */
87 allocno_hard_regs_t hard_regs;
88 /* Parent, first subnode, previous and next node with the same
89 parent in the forest. */
90 allocno_hard_regs_node_t parent, first, prev, next;
91};
92
93/* Info about changing hard reg costs of an allocno. */
94struct update_cost_record
95{
96 /* Hard regno for which we changed the cost. */
97 int hard_regno;
98 /* Divisor used when we changed the cost of HARD_REGNO. */
99 int divisor;
100 /* Next record for given allocno. */
101 struct update_cost_record *next;
102};
103
104/* To decrease footprint of ira_allocno structure we store all data
105 needed only for coloring in the following structure. */
106struct allocno_color_data
107{
108 /* TRUE value means that the allocno was not removed yet from the
109 conflicting graph during coloring. */
110 unsigned int in_graph_p : 1;
111 /* TRUE if it is put on the stack to make other allocnos
112 colorable. */
113 unsigned int may_be_spilled_p : 1;
114 /* TRUE if the allocno is trivially colorable. */
115 unsigned int colorable_p : 1;
116 /* Number of hard registers of the allocno class really
117 available for the allocno allocation. It is number of the
118 profitable hard regs. */
119 int available_regs_num;
120 /* Sum of frequencies of hard register preferences of all
121 conflicting allocnos which are not the coloring stack yet. */
122 int conflict_allocno_hard_prefs;
123 /* Allocnos in a bucket (used in coloring) chained by the following
124 two members. */
125 ira_allocno_t next_bucket_allocno;
126 ira_allocno_t prev_bucket_allocno;
127 /* Used for temporary purposes. */
128 int temp;
129 /* Used to exclude repeated processing. */
130 int last_process;
131 /* Profitable hard regs available for this pseudo allocation. It
132 means that the set excludes unavailable hard regs and hard regs
133 conflicting with given pseudo. They should be of the allocno
134 class. */
135 HARD_REG_SET profitable_hard_regs;
136 /* The allocno hard registers node. */
137 allocno_hard_regs_node_t hard_regs_node;
138 /* Array of structures allocno_hard_regs_subnode representing
139 given allocno hard registers node (the 1st element in the array)
140 and all its subnodes in the tree (forest) of allocno hard
141 register nodes (see comments above). */
142 int hard_regs_subnodes_start;
143 /* The length of the previous array. */
144 int hard_regs_subnodes_num;
145 /* Records about updating allocno hard reg costs from copies. If
146 the allocno did not get expected hard register, these records are
147 used to restore original hard reg costs of allocnos connected to
148 this allocno by copies. */
149 struct update_cost_record *update_cost_records;
150 /* Threads. We collect allocnos connected by copies into threads
151 and try to assign hard regs to allocnos by threads. */
152 /* Allocno representing all thread. */
153 ira_allocno_t first_thread_allocno;
154 /* Allocnos in thread forms a cycle list through the following
155 member. */
156 ira_allocno_t next_thread_allocno;
157 /* All thread frequency. Defined only for first thread allocno. */
158 int thread_freq;
159 /* Sum of frequencies of hard register preferences of the allocno. */
160 int hard_reg_prefs;
161};
162
163/* See above. */
164typedef struct allocno_color_data *allocno_color_data_t;
165
166/* Container for storing allocno data concerning coloring. */
167static allocno_color_data_t allocno_color_data;
168
169/* Macro to access the data concerning coloring. */
170#define ALLOCNO_COLOR_DATA(a)((allocno_color_data_t) ((a)->add_data)) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a)((a)->add_data))
171
172/* Used for finding allocno colorability to exclude repeated allocno
173 processing and for updating preferencing to exclude repeated
174 allocno processing during assignment. */
175static int curr_allocno_process;
176
177/* This file contains code for regional graph coloring, spill/restore
178 code placement optimization, and code helping the reload pass to do
179 a better job. */
180
181/* Bitmap of allocnos which should be colored. */
182static bitmap coloring_allocno_bitmap;
183
184/* Bitmap of allocnos which should be taken into account during
185 coloring. In general case it contains allocnos from
186 coloring_allocno_bitmap plus other already colored conflicting
187 allocnos. */
188static bitmap consideration_allocno_bitmap;
189
190/* All allocnos sorted according their priorities. */
191static ira_allocno_t *sorted_allocnos;
192
193/* Vec representing the stack of allocnos used during coloring. */
194static vec<ira_allocno_t> allocno_stack_vec;
195
196/* Helper for qsort comparison callbacks - return a positive integer if
197 X > Y, or a negative value otherwise. Use a conditional expression
198 instead of a difference computation to insulate from possible overflow
199 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
200#define SORTGT(x,y)(((x) > (y)) ? 1 : -1) (((x) > (y)) ? 1 : -1)
201
202
203
204/* Definition of vector of allocno hard registers. */
205
206/* Vector of unique allocno hard registers. */
207static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
208
209struct allocno_hard_regs_hasher : nofree_ptr_hash <allocno_hard_regs>
210{
211 static inline hashval_t hash (const allocno_hard_regs *);
212 static inline bool equal (const allocno_hard_regs *,
213 const allocno_hard_regs *);
214};
215
216/* Returns hash value for allocno hard registers V. */
217inline hashval_t
218allocno_hard_regs_hasher::hash (const allocno_hard_regs *hv)
219{
220 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
221}
222
223/* Compares allocno hard registers V1 and V2. */
224inline bool
225allocno_hard_regs_hasher::equal (const allocno_hard_regs *hv1,
226 const allocno_hard_regs *hv2)
227{
228 return hv1->set == hv2->set;
229}
230
231/* Hash table of unique allocno hard registers. */
232static hash_table<allocno_hard_regs_hasher> *allocno_hard_regs_htab;
233
234/* Return allocno hard registers in the hash table equal to HV. */
235static allocno_hard_regs_t
236find_hard_regs (allocno_hard_regs_t hv)
237{
238 return allocno_hard_regs_htab->find (hv);
239}
240
241/* Insert allocno hard registers HV in the hash table (if it is not
242 there yet) and return the value which in the table. */
243static allocno_hard_regs_t
244insert_hard_regs (allocno_hard_regs_t hv)
245{
246 allocno_hard_regs **slot = allocno_hard_regs_htab->find_slot (hv, INSERT);
247
248 if (*slot == NULLnullptr)
249 *slot = hv;
250 return *slot;
251}
252
253/* Initialize data concerning allocno hard registers. */
254static void
255init_allocno_hard_regs (void)
256{
257 allocno_hard_regs_vec.create (200);
258 allocno_hard_regs_htab
259 = new hash_table<allocno_hard_regs_hasher> (200);
260}
261
262/* Add (or update info about) allocno hard registers with SET and
263 COST. */
264static allocno_hard_regs_t
265add_allocno_hard_regs (HARD_REG_SET set, int64_t cost)
266{
267 struct allocno_hard_regs temp;
268 allocno_hard_regs_t hv;
269
270 gcc_assert (! hard_reg_set_empty_p (set))((void)(!(! hard_reg_set_empty_p (set)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 270, __FUNCTION__), 0 : 0))
;
271 temp.set = set;
272 if ((hv = find_hard_regs (&temp)) != NULLnullptr)
273 hv->cost += cost;
274 else
275 {
276 hv = ((struct allocno_hard_regs *)
277 ira_allocate (sizeof (struct allocno_hard_regs)));
278 hv->set = set;
279 hv->cost = cost;
280 allocno_hard_regs_vec.safe_push (hv);
281 insert_hard_regs (hv);
282 }
283 return hv;
284}
285
286/* Finalize data concerning allocno hard registers. */
287static void
288finish_allocno_hard_regs (void)
289{
290 int i;
291 allocno_hard_regs_t hv;
292
293 for (i = 0;
294 allocno_hard_regs_vec.iterate (i, &hv);
295 i++)
296 ira_free (hv);
297 delete allocno_hard_regs_htab;
298 allocno_hard_regs_htab = NULLnullptr;
299 allocno_hard_regs_vec.release ();
300}
301
302/* Sort hard regs according to their frequency of usage. */
303static int
304allocno_hard_regs_compare (const void *v1p, const void *v2p)
305{
306 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
307 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
308
309 if (hv2->cost > hv1->cost)
310 return 1;
311 else if (hv2->cost < hv1->cost)
312 return -1;
313 return SORTGT (allocno_hard_regs_hasher::hash(hv2), allocno_hard_regs_hasher::hash(hv1))(((allocno_hard_regs_hasher::hash(hv2)) > (allocno_hard_regs_hasher
::hash(hv1))) ? 1 : -1)
;
314}
315
316
317
318/* Used for finding a common ancestor of two allocno hard registers
319 nodes in the forest. We use the current value of
320 'node_check_tick' to mark all nodes from one node to the top and
321 then walking up from another node until we find a marked node.
322
323 It is also used to figure out allocno colorability as a mark that
324 we already reset value of member 'conflict_size' for the forest
325 node corresponding to the processed allocno. */
326static int node_check_tick;
327
328/* Roots of the forest containing hard register sets can be assigned
329 to allocnos. */
330static allocno_hard_regs_node_t hard_regs_roots;
331
332/* Definition of vector of allocno hard register nodes. */
333
334/* Vector used to create the forest. */
335static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
336
337/* Create and return allocno hard registers node containing allocno
338 hard registers HV. */
339static allocno_hard_regs_node_t
340create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
341{
342 allocno_hard_regs_node_t new_node;
343
344 new_node = ((struct allocno_hard_regs_node *)
345 ira_allocate (sizeof (struct allocno_hard_regs_node)));
346 new_node->check = 0;
347 new_node->hard_regs = hv;
348 new_node->hard_regs_num = hard_reg_set_size (hv->set);
349 new_node->first = NULLnullptr;
350 new_node->used_p = false;
351 return new_node;
352}
353
354/* Add allocno hard registers node NEW_NODE to the forest on its level
355 given by ROOTS. */
356static void
357add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
358 allocno_hard_regs_node_t new_node)
359{
360 new_node->next = *roots;
361 if (new_node->next != NULLnullptr)
362 new_node->next->prev = new_node;
363 new_node->prev = NULLnullptr;
364 *roots = new_node;
365}
366
367/* Add allocno hard registers HV (or its best approximation if it is
368 not possible) to the forest on its level given by ROOTS. */
369static void
370add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
371 allocno_hard_regs_t hv)
372{
373 unsigned int i, start;
374 allocno_hard_regs_node_t node, prev, new_node;
375 HARD_REG_SET temp_set;
376 allocno_hard_regs_t hv2;
377
378 start = hard_regs_node_vec.length ();
379 for (node = *roots; node != NULLnullptr; node = node->next)
380 {
381 if (hv->set == node->hard_regs->set)
382 return;
383 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
384 {
385 add_allocno_hard_regs_to_forest (&node->first, hv);
386 return;
387 }
388 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
389 hard_regs_node_vec.safe_push (node);
390 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
391 {
392 temp_set = hv->set & node->hard_regs->set;
393 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
394 add_allocno_hard_regs_to_forest (&node->first, hv2);
395 }
396 }
397 if (hard_regs_node_vec.length ()
398 > start + 1)
399 {
400 /* Create a new node which contains nodes in hard_regs_node_vec. */
401 CLEAR_HARD_REG_SET (temp_set);
402 for (i = start;
403 i < hard_regs_node_vec.length ();
404 i++)
405 {
406 node = hard_regs_node_vec[i];
407 temp_set |= node->hard_regs->set;
408 }
409 hv = add_allocno_hard_regs (temp_set, hv->cost);
410 new_node = create_new_allocno_hard_regs_node (hv);
411 prev = NULLnullptr;
412 for (i = start;
413 i < hard_regs_node_vec.length ();
414 i++)
415 {
416 node = hard_regs_node_vec[i];
417 if (node->prev == NULLnullptr)
418 *roots = node->next;
419 else
420 node->prev->next = node->next;
421 if (node->next != NULLnullptr)
422 node->next->prev = node->prev;
423 if (prev == NULLnullptr)
424 new_node->first = node;
425 else
426 prev->next = node;
427 node->prev = prev;
428 node->next = NULLnullptr;
429 prev = node;
430 }
431 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
432 }
433 hard_regs_node_vec.truncate (start);
434}
435
436/* Add allocno hard registers nodes starting with the forest level
437 given by FIRST which contains biggest set inside SET. */
438static void
439collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
440 HARD_REG_SET set)
441{
442 allocno_hard_regs_node_t node;
443
444 ira_assert (first != NULL)((void)(!(first != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 444, __FUNCTION__), 0 : 0))
;
445 for (node = first; node != NULLnullptr; node = node->next)
446 if (hard_reg_set_subset_p (node->hard_regs->set, set))
447 hard_regs_node_vec.safe_push (node);
448 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
449 collect_allocno_hard_regs_cover (node->first, set);
450}
451
452/* Set up field parent as PARENT in all allocno hard registers nodes
453 in forest given by FIRST. */
454static void
455setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
456 allocno_hard_regs_node_t parent)
457{
458 allocno_hard_regs_node_t node;
459
460 for (node = first; node != NULLnullptr; node = node->next)
461 {
462 node->parent = parent;
463 setup_allocno_hard_regs_nodes_parent (node->first, node);
464 }
465}
466
467/* Return allocno hard registers node which is a first common ancestor
468 node of FIRST and SECOND in the forest. */
469static allocno_hard_regs_node_t
470first_common_ancestor_node (allocno_hard_regs_node_t first,
471 allocno_hard_regs_node_t second)
472{
473 allocno_hard_regs_node_t node;
474
475 node_check_tick++;
476 for (node = first; node != NULLnullptr; node = node->parent)
477 node->check = node_check_tick;
478 for (node = second; node != NULLnullptr; node = node->parent)
479 if (node->check == node_check_tick)
480 return node;
481 return first_common_ancestor_node (second, first);
482}
483
484/* Print hard reg set SET to F. */
485static void
486print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
487{
488 int i, start, end;
489
490 for (start = end = -1, i = 0; i < FIRST_PSEUDO_REGISTER76; i++)
491 {
492 bool reg_included = TEST_HARD_REG_BIT (set, i);
493
494 if (reg_included)
495 {
496 if (start == -1)
497 start = i;
498 end = i;
499 }
500 if (start >= 0 && (!reg_included || i == FIRST_PSEUDO_REGISTER76 - 1))
501 {
502 if (start == end)
503 fprintf (f, " %d", start);
504 else if (start == end + 1)
505 fprintf (f, " %d %d", start, end);
506 else
507 fprintf (f, " %d-%d", start, end);
508 start = -1;
509 }
510 }
511 if (new_line_p)
512 fprintf (f, "\n");
513}
514
515/* Dump a hard reg set SET to stderr. */
516DEBUG_FUNCTION__attribute__ ((__used__)) void
517debug_hard_reg_set (HARD_REG_SET set)
518{
519 print_hard_reg_set (stderrstderr, set, true);
520}
521
522/* Print allocno hard register subforest given by ROOTS and its LEVEL
523 to F. */
524static void
525print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
526 int level)
527{
528 int i;
529 allocno_hard_regs_node_t node;
530
531 for (node = roots; node != NULLnullptr; node = node->next)
532 {
533 fprintf (f, " ");
534 for (i = 0; i < level * 2; i++)
535 fprintf (f, " ");
536 fprintf (f, "%d:(", node->preorder_num);
537 print_hard_reg_set (f, node->hard_regs->set, false);
538 fprintf (f, ")@%" PRId64"l" "d""\n", node->hard_regs->cost);
539 print_hard_regs_subforest (f, node->first, level + 1);
540 }
541}
542
543/* Print the allocno hard register forest to F. */
544static void
545print_hard_regs_forest (FILE *f)
546{
547 fprintf (f, " Hard reg set forest:\n");
548 print_hard_regs_subforest (f, hard_regs_roots, 1);
549}
550
551/* Print the allocno hard register forest to stderr. */
552void
553ira_debug_hard_regs_forest (void)
554{
555 print_hard_regs_forest (stderrstderr);
556}
557
558/* Remove unused allocno hard registers nodes from forest given by its
559 *ROOTS. */
560static void
561remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
562{
563 allocno_hard_regs_node_t node, prev, next, last;
564
565 for (prev = NULLnullptr, node = *roots; node != NULLnullptr; node = next)
566 {
567 next = node->next;
568 if (node->used_p)
569 {
570 remove_unused_allocno_hard_regs_nodes (&node->first);
571 prev = node;
572 }
573 else
574 {
575 for (last = node->first;
576 last != NULLnullptr && last->next != NULLnullptr;
577 last = last->next)
578 ;
579 if (last != NULLnullptr)
580 {
581 if (prev == NULLnullptr)
582 *roots = node->first;
583 else
584 prev->next = node->first;
585 if (next != NULLnullptr)
586 next->prev = last;
587 last->next = next;
588 next = node->first;
589 }
590 else
591 {
592 if (prev == NULLnullptr)
593 *roots = next;
594 else
595 prev->next = next;
596 if (next != NULLnullptr)
597 next->prev = prev;
598 }
599 ira_free (node);
600 }
601 }
602}
603
604/* Set up fields preorder_num starting with START_NUM in all allocno
605 hard registers nodes in forest given by FIRST. Return biggest set
606 PREORDER_NUM increased by 1. */
607static int
608enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
609 allocno_hard_regs_node_t parent,
610 int start_num)
611{
612 allocno_hard_regs_node_t node;
613
614 for (node = first; node != NULLnullptr; node = node->next)
615 {
616 node->preorder_num = start_num++;
617 node->parent = parent;
618 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
619 start_num);
620 }
621 return start_num;
622}
623
624/* Number of allocno hard registers nodes in the forest. */
625static int allocno_hard_regs_nodes_num;
626
627/* Table preorder number of allocno hard registers node in the forest
628 -> the allocno hard registers node. */
629static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
630
631/* See below. */
632typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
633
634/* The structure is used to describes all subnodes (not only immediate
635 ones) in the mentioned above tree for given allocno hard register
636 node. The usage of such data accelerates calculation of
637 colorability of given allocno. */
638struct allocno_hard_regs_subnode
639{
640 /* The conflict size of conflicting allocnos whose hard register
641 sets are equal sets (plus supersets if given node is given
642 allocno hard registers node) of one in the given node. */
643 int left_conflict_size;
644 /* The summary conflict size of conflicting allocnos whose hard
645 register sets are strict subsets of one in the given node.
646 Overall conflict size is
647 left_conflict_subnodes_size
648 + MIN (max_node_impact - left_conflict_subnodes_size,
649 left_conflict_size)
650 */
651 short left_conflict_subnodes_size;
652 short max_node_impact;
653};
654
655/* Container for hard regs subnodes of all allocnos. */
656static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
657
658/* Table (preorder number of allocno hard registers node in the
659 forest, preorder number of allocno hard registers subnode) -> index
660 of the subnode relative to the node. -1 if it is not a
661 subnode. */
662static int *allocno_hard_regs_subnode_index;
663
664/* Setup arrays ALLOCNO_HARD_REGS_NODES and
665 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
666static void
667setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
668{
669 allocno_hard_regs_node_t node, parent;
670 int index;
671
672 for (node = first; node != NULLnullptr; node = node->next)
673 {
674 allocno_hard_regs_nodes[node->preorder_num] = node;
675 for (parent = node; parent != NULLnullptr; parent = parent->parent)
676 {
677 index = parent->preorder_num * allocno_hard_regs_nodes_num;
678 allocno_hard_regs_subnode_index[index + node->preorder_num]
679 = node->preorder_num - parent->preorder_num;
680 }
681 setup_allocno_hard_regs_subnode_index (node->first);
682 }
683}
684
685/* Count all allocno hard registers nodes in tree ROOT. */
686static int
687get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
688{
689 int len = 1;
690
691 for (root = root->first; root != NULLnullptr; root = root->next)
692 len += get_allocno_hard_regs_subnodes_num (root);
693 return len;
694}
695
696/* Build the forest of allocno hard registers nodes and assign each
697 allocno a node from the forest. */
698static void
699form_allocno_hard_regs_nodes_forest (void)
700{
701 unsigned int i, j, size, len;
702 int start;
703 ira_allocno_t a;
704 allocno_hard_regs_t hv;
705 bitmap_iterator bi;
706 HARD_REG_SET temp;
707 allocno_hard_regs_node_t node, allocno_hard_regs_node;
708 allocno_color_data_t allocno_data;
709
710 node_check_tick = 0;
711 init_allocno_hard_regs ();
712 hard_regs_roots = NULLnullptr;
713 hard_regs_node_vec.create (100);
714 for (i = 0; i < FIRST_PSEUDO_REGISTER76; i++)
715 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs), i))
716 {
717 CLEAR_HARD_REG_SET (temp);
718 SET_HARD_REG_BIT (temp, i);
719 hv = add_allocno_hard_regs (temp, 0);
720 node = create_new_allocno_hard_regs_node (hv);
721 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
722 }
723 start = allocno_hard_regs_vec.length ();
724 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
725 {
726 a = ira_allocnos[i];
727 allocno_data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
728
729 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
730 continue;
731 hv = (add_allocno_hard_regs
732 (allocno_data->profitable_hard_regs,
733 ALLOCNO_MEMORY_COST (a)((a)->memory_cost) - ALLOCNO_CLASS_COST (a)((a)->class_cost)));
734 }
735 temp = ~ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs);
736 add_allocno_hard_regs (temp, 0);
737 qsort (allocno_hard_regs_vec.address () + start,gcc_qsort (allocno_hard_regs_vec.address () + start, allocno_hard_regs_vec
.length () - start, sizeof (allocno_hard_regs_t), allocno_hard_regs_compare
)
738 allocno_hard_regs_vec.length () - start,gcc_qsort (allocno_hard_regs_vec.address () + start, allocno_hard_regs_vec
.length () - start, sizeof (allocno_hard_regs_t), allocno_hard_regs_compare
)
739 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare)gcc_qsort (allocno_hard_regs_vec.address () + start, allocno_hard_regs_vec
.length () - start, sizeof (allocno_hard_regs_t), allocno_hard_regs_compare
)
;
740 for (i = start;
741 allocno_hard_regs_vec.iterate (i, &hv);
742 i++)
743 {
744 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
745 ira_assert (hard_regs_node_vec.length () == 0)((void)(!(hard_regs_node_vec.length () == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 745, __FUNCTION__), 0 : 0))
;
746 }
747 /* We need to set up parent fields for right work of
748 first_common_ancestor_node. */
749 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULLnullptr);
750 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
751 {
752 a = ira_allocnos[i];
753 allocno_data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
754 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
755 continue;
756 hard_regs_node_vec.truncate (0);
757 collect_allocno_hard_regs_cover (hard_regs_roots,
758 allocno_data->profitable_hard_regs);
759 allocno_hard_regs_node = NULLnullptr;
760 for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
761 allocno_hard_regs_node
762 = (j == 0
763 ? node
764 : first_common_ancestor_node (node, allocno_hard_regs_node));
765 /* That is a temporary storage. */
766 allocno_hard_regs_node->used_p = true;
767 allocno_data->hard_regs_node = allocno_hard_regs_node;
768 }
769 ira_assert (hard_regs_roots->next == NULL)((void)(!(hard_regs_roots->next == nullptr) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 769, __FUNCTION__), 0 : 0))
;
770 hard_regs_roots->used_p = true;
771 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
772 allocno_hard_regs_nodes_num
773 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULLnullptr, 0);
774 allocno_hard_regs_nodes
775 = ((allocno_hard_regs_node_t *)
776 ira_allocate (allocno_hard_regs_nodes_num
777 * sizeof (allocno_hard_regs_node_t)));
778 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
779 allocno_hard_regs_subnode_index
780 = (int *) ira_allocate (size * sizeof (int));
781 for (i = 0; i < size; i++)
782 allocno_hard_regs_subnode_index[i] = -1;
783 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
784 start = 0;
785 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
786 {
787 a = ira_allocnos[i];
788 allocno_data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
789 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
790 continue;
791 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
792 allocno_data->hard_regs_subnodes_start = start;
793 allocno_data->hard_regs_subnodes_num = len;
794 start += len;
795 }
796 allocno_hard_regs_subnodes
797 = ((allocno_hard_regs_subnode_t)
798 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
799 hard_regs_node_vec.release ();
800}
801
802/* Free tree of allocno hard registers nodes given by its ROOT. */
803static void
804finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
805{
806 allocno_hard_regs_node_t child, next;
807
808 for (child = root->first; child != NULLnullptr; child = next)
809 {
810 next = child->next;
811 finish_allocno_hard_regs_nodes_tree (child);
812 }
813 ira_free (root);
814}
815
816/* Finish work with the forest of allocno hard registers nodes. */
817static void
818finish_allocno_hard_regs_nodes_forest (void)
819{
820 allocno_hard_regs_node_t node, next;
821
822 ira_free (allocno_hard_regs_subnodes);
823 for (node = hard_regs_roots; node != NULLnullptr; node = next)
824 {
825 next = node->next;
826 finish_allocno_hard_regs_nodes_tree (node);
827 }
828 ira_free (allocno_hard_regs_nodes);
829 ira_free (allocno_hard_regs_subnode_index);
830 finish_allocno_hard_regs ();
831}
832
833/* Set up left conflict sizes and left conflict subnodes sizes of hard
834 registers subnodes of allocno A. Return TRUE if allocno A is
835 trivially colorable. */
836static bool
837setup_left_conflict_sizes_p (ira_allocno_t a)
838{
839 int i, k, nobj, start;
840 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
841 allocno_color_data_t data;
842 HARD_REG_SET profitable_hard_regs;
843 allocno_hard_regs_subnode_t subnodes;
844 allocno_hard_regs_node_t node;
845 HARD_REG_SET node_set;
846
847 nobj = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
848 data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
849 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
850 profitable_hard_regs = data->profitable_hard_regs;
851 node = data->hard_regs_node;
852 node_preorder_num = node->preorder_num;
853 node_set = node->hard_regs->set;
854 node_check_tick++;
855 for (k = 0; k < nobj; k++)
856 {
857 ira_object_t obj = ALLOCNO_OBJECT (a, k)((a)->objects[k]);
858 ira_object_t conflict_obj;
859 ira_object_conflict_iterator oci;
860
861 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
862 {
863 int size;
864 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
865 allocno_hard_regs_node_t conflict_node, temp_node;
866 HARD_REG_SET conflict_node_set;
867 allocno_color_data_t conflict_data;
868
869 conflict_data = ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data));
870 if (! ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->in_graph_p
871 || ! hard_reg_set_intersect_p (profitable_hard_regs,
872 conflict_data
873 ->profitable_hard_regs))
874 continue;
875 conflict_node = conflict_data->hard_regs_node;
876 conflict_node_set = conflict_node->hard_regs->set;
877 if (hard_reg_set_subset_p (node_set, conflict_node_set))
878 temp_node = node;
879 else
880 {
881 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set))((void)(!(hard_reg_set_subset_p (conflict_node_set, node_set)
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 881, __FUNCTION__), 0 : 0))
;
882 temp_node = conflict_node;
883 }
884 if (temp_node->check != node_check_tick)
885 {
886 temp_node->check = node_check_tick;
887 temp_node->conflict_size = 0;
888 }
889 size = (ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)
890 [ALLOCNO_CLASS (conflict_a)((conflict_a)->aclass)][ALLOCNO_MODE (conflict_a)((conflict_a)->mode)]);
891 if (ALLOCNO_NUM_OBJECTS (conflict_a)((conflict_a)->num_objects) > 1)
892 /* We will deal with the subwords individually. */
893 size = 1;
894 temp_node->conflict_size += size;
895 }
896 }
897 for (i = 0; i < data->hard_regs_subnodes_num; i++)
898 {
899 allocno_hard_regs_node_t temp_node;
900
901 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
902 ira_assert (temp_node->preorder_num == i + node_preorder_num)((void)(!(temp_node->preorder_num == i + node_preorder_num
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 902, __FUNCTION__), 0 : 0))
;
903 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
904 ? 0 : temp_node->conflict_size);
905 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
906 profitable_hard_regs))
907 subnodes[i].max_node_impact = temp_node->hard_regs_num;
908 else
909 {
910 HARD_REG_SET temp_set;
911 int j, n, hard_regno;
912 enum reg_class aclass;
913
914 temp_set = temp_node->hard_regs->set & profitable_hard_regs;
915 aclass = ALLOCNO_CLASS (a)((a)->aclass);
916 for (n = 0, j = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[aclass] - 1; j >= 0; j--)
917 {
918 hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[aclass][j];
919 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
920 n++;
921 }
922 subnodes[i].max_node_impact = n;
923 }
924 subnodes[i].left_conflict_subnodes_size = 0;
925 }
926 start = node_preorder_num * allocno_hard_regs_nodes_num;
927 for (i = data->hard_regs_subnodes_num - 1; i > 0; i--)
928 {
929 int size, parent_i;
930 allocno_hard_regs_node_t parent;
931
932 size = (subnodes[i].left_conflict_subnodes_size
933 + MIN (subnodes[i].max_node_impact((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
934 - subnodes[i].left_conflict_subnodes_size,((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
935 subnodes[i].left_conflict_size)((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
);
936 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
937 gcc_checking_assert(parent)((void)(!(parent) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 937, __FUNCTION__), 0 : 0))
;
938 parent_i
939 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
940 gcc_checking_assert(parent_i >= 0)((void)(!(parent_i >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 940, __FUNCTION__), 0 : 0))
;
941 subnodes[parent_i].left_conflict_subnodes_size += size;
942 }
943 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
944 conflict_size
945 = (left_conflict_subnodes_size
946 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,((subnodes[0].max_node_impact - left_conflict_subnodes_size) <
(subnodes[0].left_conflict_size) ? (subnodes[0].max_node_impact
- left_conflict_subnodes_size) : (subnodes[0].left_conflict_size
))
947 subnodes[0].left_conflict_size)((subnodes[0].max_node_impact - left_conflict_subnodes_size) <
(subnodes[0].left_conflict_size) ? (subnodes[0].max_node_impact
- left_conflict_subnodes_size) : (subnodes[0].left_conflict_size
))
);
948 conflict_size += ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[ALLOCNO_CLASS (a)((a)->aclass)][ALLOCNO_MODE (a)((a)->mode)];
949 data->colorable_p = conflict_size <= data->available_regs_num;
950 return data->colorable_p;
951}
952
953/* Update left conflict sizes of hard registers subnodes of allocno A
954 after removing allocno REMOVED_A with SIZE from the conflict graph.
955 Return TRUE if A is trivially colorable. */
956static bool
957update_left_conflict_sizes_p (ira_allocno_t a,
958 ira_allocno_t removed_a, int size)
959{
960 int i, conflict_size, before_conflict_size, diff, start;
961 int node_preorder_num, parent_i;
962 allocno_hard_regs_node_t node, removed_node, parent;
963 allocno_hard_regs_subnode_t subnodes;
964 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
965
966 ira_assert (! data->colorable_p)((void)(!(! data->colorable_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 966, __FUNCTION__), 0 : 0))
;
967 node = data->hard_regs_node;
968 node_preorder_num = node->preorder_num;
969 removed_node = ALLOCNO_COLOR_DATA (removed_a)((allocno_color_data_t) ((removed_a)->add_data))->hard_regs_node;
970 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,((void)(!(hard_reg_set_subset_p (removed_node->hard_regs->
set, node->hard_regs->set) || hard_reg_set_subset_p (node
->hard_regs->set, removed_node->hard_regs->set)) ?
fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 973, __FUNCTION__), 0 : 0))
971 node->hard_regs->set)((void)(!(hard_reg_set_subset_p (removed_node->hard_regs->
set, node->hard_regs->set) || hard_reg_set_subset_p (node
->hard_regs->set, removed_node->hard_regs->set)) ?
fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 973, __FUNCTION__), 0 : 0))
972 || hard_reg_set_subset_p (node->hard_regs->set,((void)(!(hard_reg_set_subset_p (removed_node->hard_regs->
set, node->hard_regs->set) || hard_reg_set_subset_p (node
->hard_regs->set, removed_node->hard_regs->set)) ?
fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 973, __FUNCTION__), 0 : 0))
973 removed_node->hard_regs->set))((void)(!(hard_reg_set_subset_p (removed_node->hard_regs->
set, node->hard_regs->set) || hard_reg_set_subset_p (node
->hard_regs->set, removed_node->hard_regs->set)) ?
fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 973, __FUNCTION__), 0 : 0))
;
974 start = node_preorder_num * allocno_hard_regs_nodes_num;
975 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
976 if (i < 0)
977 i = 0;
978 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
979 before_conflict_size
980 = (subnodes[i].left_conflict_subnodes_size
981 + MIN (subnodes[i].max_node_impact((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
982 - subnodes[i].left_conflict_subnodes_size,((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
983 subnodes[i].left_conflict_size)((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
);
984 subnodes[i].left_conflict_size -= size;
985 for (;;)
986 {
987 conflict_size
988 = (subnodes[i].left_conflict_subnodes_size
989 + MIN (subnodes[i].max_node_impact((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
990 - subnodes[i].left_conflict_subnodes_size,((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
991 subnodes[i].left_conflict_size)((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
);
992 if ((diff = before_conflict_size - conflict_size) == 0)
993 break;
994 ira_assert (conflict_size < before_conflict_size)((void)(!(conflict_size < before_conflict_size) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 994, __FUNCTION__), 0 : 0))
;
995 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
996 if (parent == NULLnullptr)
997 break;
998 parent_i
999 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
1000 if (parent_i < 0)
1001 break;
1002 i = parent_i;
1003 before_conflict_size
1004 = (subnodes[i].left_conflict_subnodes_size
1005 + MIN (subnodes[i].max_node_impact((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
1006 - subnodes[i].left_conflict_subnodes_size,((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
1007 subnodes[i].left_conflict_size)((subnodes[i].max_node_impact - subnodes[i].left_conflict_subnodes_size
) < (subnodes[i].left_conflict_size) ? (subnodes[i].max_node_impact
- subnodes[i].left_conflict_subnodes_size) : (subnodes[i].left_conflict_size
))
);
1008 subnodes[i].left_conflict_subnodes_size -= diff;
1009 }
1010 if (i != 0
1011 || (conflict_size
1012 + ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[ALLOCNO_CLASS (a)((a)->aclass)][ALLOCNO_MODE (a)((a)->mode)]
1013 > data->available_regs_num))
1014 return false;
1015 data->colorable_p = true;
1016 return true;
1017}
1018
1019/* Return true if allocno A has empty profitable hard regs. */
1020static bool
1021empty_profitable_hard_regs (ira_allocno_t a)
1022{
1023 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
1024
1025 return hard_reg_set_empty_p (data->profitable_hard_regs);
1026}
1027
1028/* Set up profitable hard registers for each allocno being
1029 colored. */
1030static void
1031setup_profitable_hard_regs (void)
1032{
1033 unsigned int i;
1034 int j, k, nobj, hard_regno, nregs, class_size;
1035 ira_allocno_t a;
1036 bitmap_iterator bi;
1037 enum reg_class aclass;
1038 machine_mode mode;
1039 allocno_color_data_t data;
1040
1041 /* Initial set up from allocno classes and explicitly conflicting
1042 hard regs. */
1043 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
1044 {
1045 a = ira_allocnos[i];
1046 if ((aclass = ALLOCNO_CLASS (a)((a)->aclass)) == NO_REGS)
1047 continue;
1048 data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
1049 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a)((a)->updated_hard_reg_costs) == NULLnullptr
1050 && ALLOCNO_CLASS_COST (a)((a)->class_cost) > ALLOCNO_MEMORY_COST (a)((a)->memory_cost)
1051 /* Do not empty profitable regs for static chain pointer
1052 pseudo when non-local goto is used. */
1053 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)((a)->regno)))
1054 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1055 else
1056 {
1057 mode = ALLOCNO_MODE (a)((a)->mode);
1058 data->profitable_hard_regs
1059 = ira_useful_class_mode_regs(this_target_ira_int->x_ira_useful_class_mode_regs)[aclass][mode];
1060 nobj = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
1061 for (k = 0; k < nobj; k++)
1062 {
1063 ira_object_t obj = ALLOCNO_OBJECT (a, k)((a)->objects[k]);
1064
1065 data->profitable_hard_regs
1066 &= ~OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs);
1067 }
1068 }
1069 }
1070 /* Exclude hard regs already assigned for conflicting objects. */
1071 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (consideration_allocno_bitmap
), (0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
1072 {
1073 a = ira_allocnos[i];
1074 if ((aclass = ALLOCNO_CLASS (a)((a)->aclass)) == NO_REGS
1075 || ! ALLOCNO_ASSIGNED_P (a)((a)->assigned_p)
1076 || (hard_regno = ALLOCNO_HARD_REGNO (a)((a)->hard_regno)) < 0)
1077 continue;
1078 mode = ALLOCNO_MODE (a)((a)->mode);
1079 nregs = hard_regno_nregs (hard_regno, mode);
1080 nobj = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
1081 for (k = 0; k < nobj; k++)
1082 {
1083 ira_object_t obj = ALLOCNO_OBJECT (a, k)((a)->objects[k]);
1084 ira_object_t conflict_obj;
1085 ira_object_conflict_iterator oci;
1086
1087 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
1088 {
1089 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
1090
1091 /* We can process the conflict allocno repeatedly with
1092 the same result. */
1093 if (nregs == nobj && nregs > 1)
1094 {
1095 int num = OBJECT_SUBWORD (conflict_obj)((conflict_obj)->subword);
1096
1097 if (REG_WORDS_BIG_ENDIAN0)
1098 CLEAR_HARD_REG_BIT
1099 (ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->profitable_hard_regs,
1100 hard_regno + nobj - num - 1);
1101 else
1102 CLEAR_HARD_REG_BIT
1103 (ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->profitable_hard_regs,
1104 hard_regno + num);
1105 }
1106 else
1107 ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->profitable_hard_regs
1108 &= ~ira_reg_mode_hard_regset(this_target_ira_int->x_ira_reg_mode_hard_regset)[hard_regno][mode];
1109 }
1110 }
1111 }
1112 /* Exclude too costly hard regs. */
1113 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
1114 {
1115 int min_cost = INT_MAX2147483647;
1116 int *costs;
1117
1118 a = ira_allocnos[i];
1119 if ((aclass = ALLOCNO_CLASS (a)((a)->aclass)) == NO_REGS
1120 || empty_profitable_hard_regs (a))
1121 continue;
1122 data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
1123 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)((a)->updated_hard_reg_costs)) != NULLnullptr
1124 || (costs = ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs)) != NULLnullptr)
1125 {
1126 class_size = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[aclass];
1127 for (j = 0; j < class_size; j++)
1128 {
1129 hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[aclass][j];
1130 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1131 hard_regno))
1132 continue;
1133 if (ALLOCNO_UPDATED_MEMORY_COST (a)((a)->updated_memory_cost) < costs[j]
1134 /* Do not remove HARD_REGNO for static chain pointer
1135 pseudo when non-local goto is used. */
1136 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)((a)->regno)))
1137 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1138 hard_regno);
1139 else if (min_cost > costs[j])
1140 min_cost = costs[j];
1141 }
1142 }
1143 else if (ALLOCNO_UPDATED_MEMORY_COST (a)((a)->updated_memory_cost)
1144 < ALLOCNO_UPDATED_CLASS_COST (a)((a)->updated_class_cost)
1145 /* Do not empty profitable regs for static chain
1146 pointer pseudo when non-local goto is used. */
1147 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)((a)->regno)))
1148 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1149 if (ALLOCNO_UPDATED_CLASS_COST (a)((a)->updated_class_cost) > min_cost)
1150 ALLOCNO_UPDATED_CLASS_COST (a)((a)->updated_class_cost) = min_cost;
1151 }
1152}
1153
1154
1155
1156/* This page contains functions used to choose hard registers for
1157 allocnos. */
1158
1159/* Pool for update cost records. */
1160static object_allocator<update_cost_record> update_cost_record_pool
1161 ("update cost records");
1162
1163/* Return new update cost record with given params. */
1164static struct update_cost_record *
1165get_update_cost_record (int hard_regno, int divisor,
1166 struct update_cost_record *next)
1167{
1168 struct update_cost_record *record;
1169
1170 record = update_cost_record_pool.allocate ();
1171 record->hard_regno = hard_regno;
1172 record->divisor = divisor;
1173 record->next = next;
1174 return record;
1175}
1176
1177/* Free memory for all records in LIST. */
1178static void
1179free_update_cost_record_list (struct update_cost_record *list)
1180{
1181 struct update_cost_record *next;
1182
1183 while (list != NULLnullptr)
1184 {
1185 next = list->next;
1186 update_cost_record_pool.remove (list);
1187 list = next;
1188 }
1189}
1190
1191/* Free memory allocated for all update cost records. */
1192static void
1193finish_update_cost_records (void)
1194{
1195 update_cost_record_pool.release ();
1196}
1197
1198/* Array whose element value is TRUE if the corresponding hard
1199 register was already allocated for an allocno. */
1200static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER76];
1201
1202/* Describes one element in a queue of allocnos whose costs need to be
1203 updated. Each allocno in the queue is known to have an allocno
1204 class. */
1205struct update_cost_queue_elem
1206{
1207 /* This element is in the queue iff CHECK == update_cost_check. */
1208 int check;
1209
1210 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1211 connecting this allocno to the one being allocated. */
1212 int divisor;
1213
1214 /* Allocno from which we started chaining costs of connected
1215 allocnos. */
1216 ira_allocno_t start;
1217
1218 /* Allocno from which we are chaining costs of connected allocnos.
1219 It is used not go back in graph of allocnos connected by
1220 copies. */
1221 ira_allocno_t from;
1222
1223 /* The next allocno in the queue, or null if this is the last element. */
1224 ira_allocno_t next;
1225};
1226
1227/* The first element in a queue of allocnos whose copy costs need to be
1228 updated. Null if the queue is empty. */
1229static ira_allocno_t update_cost_queue;
1230
1231/* The last element in the queue described by update_cost_queue.
1232 Not valid if update_cost_queue is null. */
1233static struct update_cost_queue_elem *update_cost_queue_tail;
1234
1235/* A pool of elements in the queue described by update_cost_queue.
1236 Elements are indexed by ALLOCNO_NUM. */
1237static struct update_cost_queue_elem *update_cost_queue_elems;
1238
1239/* The current value of update_costs_from_copies call count. */
1240static int update_cost_check;
1241
1242/* Allocate and initialize data necessary for function
1243 update_costs_from_copies. */
1244static void
1245initiate_cost_update (void)
1246{
1247 size_t size;
1248
1249 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1250 update_cost_queue_elems
1251 = (struct update_cost_queue_elem *) ira_allocate (size);
1252 memset (update_cost_queue_elems, 0, size);
1253 update_cost_check = 0;
1254}
1255
1256/* Deallocate data used by function update_costs_from_copies. */
1257static void
1258finish_cost_update (void)
1259{
1260 ira_free (update_cost_queue_elems);
1261 finish_update_cost_records ();
1262}
1263
1264/* When we traverse allocnos to update hard register costs, the cost
1265 divisor will be multiplied by the following macro value for each
1266 hop from given allocno to directly connected allocnos. */
1267#define COST_HOP_DIVISOR4 4
1268
1269/* Start a new cost-updating pass. */
1270static void
1271start_update_cost (void)
1272{
1273 update_cost_check++;
1274 update_cost_queue = NULLnullptr;
1275}
1276
1277/* Add (ALLOCNO, START, FROM, DIVISOR) to the end of update_cost_queue, unless
1278 ALLOCNO is already in the queue, or has NO_REGS class. */
1279static inline void
1280queue_update_cost (ira_allocno_t allocno, ira_allocno_t start,
1281 ira_allocno_t from, int divisor)
1282{
1283 struct update_cost_queue_elem *elem;
1284
1285 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)((allocno)->num)];
1286 if (elem->check != update_cost_check
1287 && ALLOCNO_CLASS (allocno)((allocno)->aclass) != NO_REGS)
1288 {
1289 elem->check = update_cost_check;
1290 elem->start = start;
1291 elem->from = from;
1292 elem->divisor = divisor;
1293 elem->next = NULLnullptr;
1294 if (update_cost_queue == NULLnullptr)
1295 update_cost_queue = allocno;
1296 else
1297 update_cost_queue_tail->next = allocno;
1298 update_cost_queue_tail = elem;
1299 }
1300}
1301
1302/* Try to remove the first element from update_cost_queue. Return
1303 false if the queue was empty, otherwise make (*ALLOCNO, *START,
1304 *FROM, *DIVISOR) describe the removed element. */
1305static inline bool
1306get_next_update_cost (ira_allocno_t *allocno, ira_allocno_t *start,
1307 ira_allocno_t *from, int *divisor)
1308{
1309 struct update_cost_queue_elem *elem;
1310
1311 if (update_cost_queue == NULLnullptr)
1312 return false;
1313
1314 *allocno = update_cost_queue;
1315 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)((*allocno)->num)];
1316 *start = elem->start;
1317 *from = elem->from;
1318 *divisor = elem->divisor;
1319 update_cost_queue = elem->next;
1320 return true;
1321}
1322
1323/* Increase costs of HARD_REGNO by UPDATE_COST and conflict cost by
1324 UPDATE_CONFLICT_COST for ALLOCNO. Return true if we really
1325 modified the cost. */
1326static bool
1327update_allocno_cost (ira_allocno_t allocno, int hard_regno,
1328 int update_cost, int update_conflict_cost)
1329{
1330 int i;
1331 enum reg_class aclass = ALLOCNO_CLASS (allocno)((allocno)->aclass);
1332
1333 i = ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][hard_regno];
1334 if (i < 0)
1335 return false;
1336 ira_allocate_and_set_or_copy_costs
1337 (&ALLOCNO_UPDATED_HARD_REG_COSTS (allocno)((allocno)->updated_hard_reg_costs), aclass,
1338 ALLOCNO_UPDATED_CLASS_COST (allocno)((allocno)->updated_class_cost),
1339 ALLOCNO_HARD_REG_COSTS (allocno)((allocno)->hard_reg_costs));
1340 ira_allocate_and_set_or_copy_costs
1341 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno)((allocno)->updated_conflict_hard_reg_costs),
1342 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (allocno)((allocno)->conflict_hard_reg_costs));
1343 ALLOCNO_UPDATED_HARD_REG_COSTS (allocno)((allocno)->updated_hard_reg_costs)[i] += update_cost;
1344 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno)((allocno)->updated_conflict_hard_reg_costs)[i] += update_conflict_cost;
1345 return true;
1346}
1347
1348/* Return TRUE if the object OBJ conflicts with the allocno A. */
1349static bool
1350object_conflicts_with_allocno_p (ira_object_t obj, ira_allocno_t a)
1351{
1352 if (!OBJECT_CONFLICT_VEC_P (obj)((obj)->conflict_vec_p))
1353 for (int word = 0; word < ALLOCNO_NUM_OBJECTS (a)((a)->num_objects); word++)
1354 {
1355 ira_object_t another_obj = ALLOCNO_OBJECT (a, word)((a)->objects[word]);
1356 if (OBJECT_CONFLICT_ID (another_obj)((another_obj)->id) >= OBJECT_MIN (obj)((obj)->min)
1357 && OBJECT_CONFLICT_ID (another_obj)((another_obj)->id) <= OBJECT_MAX (obj)((obj)->max)
1358 && TEST_MINMAX_SET_BIT (OBJECT_CONFLICT_BITVEC (obj),__extension__ (({ int _min = (((obj)->min)), _max = (((obj
)->max)), _i = (((another_obj)->id)); if (_i < _min ||
_i > _max) { fprintf (stderr, "\n%s: %d: error in %s: %d not in range [%d,%d]\n"
, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1360, __FUNCTION__, _i, _min, _max); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1360, __FUNCTION__)); } ((((long *)(obj)->conflicts_array
))[(unsigned) (_i - _min) / 64] & ((long) 1 << ((unsigned
) (_i - _min) % 64))); }))
1359 OBJECT_CONFLICT_ID (another_obj),__extension__ (({ int _min = (((obj)->min)), _max = (((obj
)->max)), _i = (((another_obj)->id)); if (_i < _min ||
_i > _max) { fprintf (stderr, "\n%s: %d: error in %s: %d not in range [%d,%d]\n"
, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1360, __FUNCTION__, _i, _min, _max); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1360, __FUNCTION__)); } ((((long *)(obj)->conflicts_array
))[(unsigned) (_i - _min) / 64] & ((long) 1 << ((unsigned
) (_i - _min) % 64))); }))
1360 OBJECT_MIN (obj), OBJECT_MAX (obj))__extension__ (({ int _min = (((obj)->min)), _max = (((obj
)->max)), _i = (((another_obj)->id)); if (_i < _min ||
_i > _max) { fprintf (stderr, "\n%s: %d: error in %s: %d not in range [%d,%d]\n"
, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1360, __FUNCTION__, _i, _min, _max); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1360, __FUNCTION__)); } ((((long *)(obj)->conflicts_array
))[(unsigned) (_i - _min) / 64] & ((long) 1 << ((unsigned
) (_i - _min) % 64))); }))
)
1361 return true;
1362 }
1363 else
1364 {
1365 /* If this linear walk ever becomes a bottleneck we could add a
1366 conflict_vec_sorted_p flag and if not set, sort the conflicts after
1367 their ID so we can use a binary search. That would also require
1368 tracking the actual number of conflicts in the vector to not rely
1369 on the NULL termination. */
1370 ira_object_conflict_iterator oci;
1371 ira_object_t conflict_obj;
1372 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
1373 if (OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno) == a)
1374 return true;
1375 }
1376 return false;
1377}
1378
1379/* Return TRUE if allocnos A1 and A2 conflicts. Here we are
1380 interested only in conflicts of allocnos with intersecting allocno
1381 classes. */
1382static bool
1383allocnos_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
1384{
1385 /* Compute the upper bound for the linear iteration when the object
1386 conflicts are represented as a sparse vector. In particular this
1387 will make sure we prefer O(1) bitvector testing. */
1388 int num_conflicts_in_vec1 = 0, num_conflicts_in_vec2 = 0;
1389 for (int word = 0; word < ALLOCNO_NUM_OBJECTS (a1)((a1)->num_objects); ++word)
1390 if (OBJECT_CONFLICT_VEC_P (ALLOCNO_OBJECT (a1, word))((((a1)->objects[word]))->conflict_vec_p))
1391 num_conflicts_in_vec1 += OBJECT_NUM_CONFLICTS (ALLOCNO_OBJECT (a1, word))((((a1)->objects[word]))->num_accumulated_conflicts);
1392 for (int word = 0; word < ALLOCNO_NUM_OBJECTS (a2)((a2)->num_objects); ++word)
1393 if (OBJECT_CONFLICT_VEC_P (ALLOCNO_OBJECT (a2, word))((((a2)->objects[word]))->conflict_vec_p))
1394 num_conflicts_in_vec2 += OBJECT_NUM_CONFLICTS (ALLOCNO_OBJECT (a2, word))((((a2)->objects[word]))->num_accumulated_conflicts);
1395 if (num_conflicts_in_vec2 < num_conflicts_in_vec1)
1396 std::swap (a1, a2);
1397
1398 for (int word = 0; word < ALLOCNO_NUM_OBJECTS (a1)((a1)->num_objects); word++)
1399 {
1400 ira_object_t obj = ALLOCNO_OBJECT (a1, word)((a1)->objects[word]);
1401 /* Take preferences of conflicting allocnos into account. */
1402 if (object_conflicts_with_allocno_p (obj, a2))
1403 return true;
1404 }
1405 return false;
1406}
1407
1408/* Update (decrease if DECR_P) HARD_REGNO cost of allocnos connected
1409 by copies to ALLOCNO to increase chances to remove some copies as
1410 the result of subsequent assignment. Update conflict costs.
1411 Record cost updates if RECORD_P is true. */
1412static void
1413update_costs_from_allocno (ira_allocno_t allocno, int hard_regno,
1414 int divisor, bool decr_p, bool record_p)
1415{
1416 int cost, update_cost, update_conflict_cost;
1417 machine_mode mode;
1418 enum reg_class rclass, aclass;
1419 ira_allocno_t another_allocno, start = allocno, from = NULLnullptr;
1420 ira_copy_t cp, next_cp;
1421
1422 rclass = REGNO_REG_CLASS (hard_regno)(regclass_map[(hard_regno)]);
1423 do
1424 {
1425 mode = ALLOCNO_MODE (allocno)((allocno)->mode);
1426 ira_init_register_move_cost_if_necessary (mode);
1427 for (cp = ALLOCNO_COPIES (allocno)((allocno)->allocno_copies); cp != NULLnullptr; cp = next_cp)
1428 {
1429 if (cp->first == allocno)
1430 {
1431 next_cp = cp->next_first_allocno_copy;
1432 another_allocno = cp->second;
1433 }
1434 else if (cp->second == allocno)
1435 {
1436 next_cp = cp->next_second_allocno_copy;
1437 another_allocno = cp->first;
1438 }
1439 else
1440 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1440, __FUNCTION__))
;
1441
1442 if (another_allocno == from
1443 || (ALLOCNO_COLOR_DATA (another_allocno)((allocno_color_data_t) ((another_allocno)->add_data)) != NULLnullptr
1444 && (ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->first_thread_allocno
1445 != ALLOCNO_COLOR_DATA (another_allocno)((allocno_color_data_t) ((another_allocno)->add_data))->first_thread_allocno)))
1446 continue;
1447
1448 aclass = ALLOCNO_CLASS (another_allocno)((another_allocno)->aclass);
1449 if (! TEST_HARD_REG_BIT (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[aclass],
1450 hard_regno)
1451 || ALLOCNO_ASSIGNED_P (another_allocno)((another_allocno)->assigned_p))
1452 continue;
1453
1454 /* If we have different modes use the smallest one. It is
1455 a sub-register move. It is hard to predict what LRA
1456 will reload (the pseudo or its sub-register) but LRA
1457 will try to minimize the data movement. Also for some
1458 register classes bigger modes might be invalid,
1459 e.g. DImode for AREG on x86. For such cases the
1460 register move cost will be maximal. */
1461 mode = narrower_subreg_mode (ALLOCNO_MODE (cp->first)((cp->first)->mode),
1462 ALLOCNO_MODE (cp->second)((cp->second)->mode));
1463
1464 ira_init_register_move_cost_if_necessary (mode);
1465
1466 cost = (cp->second == allocno
1467 ? ira_register_move_cost(this_target_ira_int->x_ira_register_move_cost)[mode][rclass][aclass]
1468 : ira_register_move_cost(this_target_ira_int->x_ira_register_move_cost)[mode][aclass][rclass]);
1469 if (decr_p)
1470 cost = -cost;
1471
1472 update_cost = cp->freq * cost / divisor;
1473 update_conflict_cost = update_cost;
1474
1475 if (internal_flag_ira_verbose > 5 && ira_dump_file != NULLnullptr)
1476 fprintf (ira_dump_file,
1477 " a%dr%d (hr%d): update cost by %d, conflict cost by %d\n",
1478 ALLOCNO_NUM (another_allocno)((another_allocno)->num), ALLOCNO_REGNO (another_allocno)((another_allocno)->regno),
1479 hard_regno, update_cost, update_conflict_cost);
1480 if (update_cost == 0)
1481 continue;
1482
1483 if (! update_allocno_cost (another_allocno, hard_regno,
1484 update_cost, update_conflict_cost))
1485 continue;
1486 queue_update_cost (another_allocno, start, allocno,
1487 divisor * COST_HOP_DIVISOR4);
1488 if (record_p && ALLOCNO_COLOR_DATA (another_allocno)((allocno_color_data_t) ((another_allocno)->add_data)) != NULLnullptr)
1489 ALLOCNO_COLOR_DATA (another_allocno)((allocno_color_data_t) ((another_allocno)->add_data))->update_cost_records
1490 = get_update_cost_record (hard_regno, divisor,
1491 ALLOCNO_COLOR_DATA (another_allocno)((allocno_color_data_t) ((another_allocno)->add_data))
1492 ->update_cost_records);
1493 }
1494 }
1495 while (get_next_update_cost (&allocno, &start, &from, &divisor));
1496}
1497
1498/* Decrease preferred ALLOCNO hard register costs and costs of
1499 allocnos connected to ALLOCNO through copy. */
1500static void
1501update_costs_from_prefs (ira_allocno_t allocno)
1502{
1503 ira_pref_t pref;
1504
1505 start_update_cost ();
1506 for (pref = ALLOCNO_PREFS (allocno)((allocno)->allocno_prefs); pref != NULLnullptr; pref = pref->next_pref)
1507 {
1508 if (internal_flag_ira_verbose > 5 && ira_dump_file != NULLnullptr)
1509 fprintf (ira_dump_file, " Start updating from pref of hr%d for a%dr%d:\n",
1510 pref->hard_regno, ALLOCNO_NUM (allocno)((allocno)->num), ALLOCNO_REGNO (allocno)((allocno)->regno));
1511 update_costs_from_allocno (allocno, pref->hard_regno,
1512 COST_HOP_DIVISOR4, true, true);
1513 }
1514}
1515
1516/* Update (decrease if DECR_P) the cost of allocnos connected to
1517 ALLOCNO through copies to increase chances to remove some copies as
1518 the result of subsequent assignment. ALLOCNO was just assigned to
1519 a hard register. Record cost updates if RECORD_P is true. */
1520static void
1521update_costs_from_copies (ira_allocno_t allocno, bool decr_p, bool record_p)
1522{
1523 int hard_regno;
1524
1525 hard_regno = ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno);
1526 ira_assert (hard_regno >= 0 && ALLOCNO_CLASS (allocno) != NO_REGS)((void)(!(hard_regno >= 0 && ((allocno)->aclass
) != NO_REGS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1526, __FUNCTION__), 0 : 0))
;
1527 start_update_cost ();
1528 if (internal_flag_ira_verbose > 5 && ira_dump_file != NULLnullptr)
1529 fprintf (ira_dump_file, " Start updating from a%dr%d by copies:\n",
1530 ALLOCNO_NUM (allocno)((allocno)->num), ALLOCNO_REGNO (allocno)((allocno)->regno));
1531 update_costs_from_allocno (allocno, hard_regno, 1, decr_p, record_p);
1532}
1533
1534/* Update conflict_allocno_hard_prefs of allocnos conflicting with
1535 ALLOCNO. */
1536static void
1537update_conflict_allocno_hard_prefs (ira_allocno_t allocno)
1538{
1539 int l, nr = ALLOCNO_NUM_OBJECTS (allocno)((allocno)->num_objects);
1540
1541 for (l = 0; l < nr; l++)
1542 {
1543 ira_object_t conflict_obj, obj = ALLOCNO_OBJECT (allocno, l)((allocno)->objects[l]);
1544 ira_object_conflict_iterator oci;
1545
1546 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
1547 {
1548 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
1549 allocno_color_data_t conflict_data = ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data));
1550 ira_pref_t pref;
1551
1552 if (!(hard_reg_set_intersect_p
1553 (ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->profitable_hard_regs,
1554 conflict_data->profitable_hard_regs)))
1555 continue;
1556 for (pref = ALLOCNO_PREFS (allocno)((allocno)->allocno_prefs);
1557 pref != NULLnullptr;
1558 pref = pref->next_pref)
1559 conflict_data->conflict_allocno_hard_prefs += pref->freq;
1560 }
1561 }
1562}
1563
1564/* Restore costs of allocnos connected to ALLOCNO by copies as it was
1565 before updating costs of these allocnos from given allocno. This
1566 is a wise thing to do as if given allocno did not get an expected
1567 hard reg, using smaller cost of the hard reg for allocnos connected
1568 by copies to given allocno becomes actually misleading. Free all
1569 update cost records for ALLOCNO as we don't need them anymore. */
1570static void
1571restore_costs_from_copies (ira_allocno_t allocno)
1572{
1573 struct update_cost_record *records, *curr;
1574
1575 if (ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data)) == NULLnullptr)
1576 return;
1577 records = ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->update_cost_records;
1578 start_update_cost ();
1579 if (internal_flag_ira_verbose > 5 && ira_dump_file != NULLnullptr)
1580 fprintf (ira_dump_file, " Start restoring from a%dr%d:\n",
1581 ALLOCNO_NUM (allocno)((allocno)->num), ALLOCNO_REGNO (allocno)((allocno)->regno));
1582 for (curr = records; curr != NULLnullptr; curr = curr->next)
1583 update_costs_from_allocno (allocno, curr->hard_regno,
1584 curr->divisor, true, false);
1585 free_update_cost_record_list (records);
1586 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->update_cost_records = NULLnullptr;
1587}
1588
1589/* This function updates COSTS (decrease if DECR_P) for hard_registers
1590 of ACLASS by conflict costs of the unassigned allocnos
1591 connected by copies with allocnos in update_cost_queue. This
1592 update increases chances to remove some copies. */
1593static void
1594update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
1595 bool decr_p)
1596{
1597 int i, cost, class_size, freq, mult, div, divisor;
1598 int index, hard_regno;
1599 int *conflict_costs;
1600 bool cont_p;
1601 enum reg_class another_aclass;
1602 ira_allocno_t allocno, another_allocno, start, from;
1603 ira_copy_t cp, next_cp;
1604
1605 while (get_next_update_cost (&allocno, &start, &from, &divisor))
1606 for (cp = ALLOCNO_COPIES (allocno)((allocno)->allocno_copies); cp != NULLnullptr; cp = next_cp)
1607 {
1608 if (cp->first == allocno)
1609 {
1610 next_cp = cp->next_first_allocno_copy;
1611 another_allocno = cp->second;
1612 }
1613 else if (cp->second == allocno)
1614 {
1615 next_cp = cp->next_second_allocno_copy;
1616 another_allocno = cp->first;
1617 }
1618 else
1619 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1619, __FUNCTION__))
;
1620
1621 another_aclass = ALLOCNO_CLASS (another_allocno)((another_allocno)->aclass);
1622 if (another_allocno == from
1623 || ALLOCNO_ASSIGNED_P (another_allocno)((another_allocno)->assigned_p)
1624 || ALLOCNO_COLOR_DATA (another_allocno)((allocno_color_data_t) ((another_allocno)->add_data))->may_be_spilled_p
1625 || ! ira_reg_classes_intersect_p(this_target_ira->x_ira_reg_classes_intersect_p)[aclass][another_aclass])
1626 continue;
1627 if (allocnos_conflict_p (another_allocno, start))
1628 continue;
1629
1630 class_size = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[another_aclass];
1631 ira_allocate_and_copy_costs
1632 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)((another_allocno)->updated_conflict_hard_reg_costs),
1633 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno)((another_allocno)->conflict_hard_reg_costs));
1634 conflict_costs
1635 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)((another_allocno)->updated_conflict_hard_reg_costs);
1636 if (conflict_costs == NULLnullptr)
1637 cont_p = true;
1638 else
1639 {
1640 mult = cp->freq;
1641 freq = ALLOCNO_FREQ (another_allocno)((another_allocno)->freq);
1642 if (freq == 0)
1643 freq = 1;
1644 div = freq * divisor;
1645 cont_p = false;
1646 for (i = class_size - 1; i >= 0; i--)
1647 {
1648 hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[another_aclass][i];
1649 ira_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1649, __FUNCTION__), 0 : 0))
;
1650 index = ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][hard_regno];
1651 if (index < 0)
1652 continue;
1653 cost = (int) (((int64_t) conflict_costs [i] * mult) / div);
1654 if (cost == 0)
1655 continue;
1656 cont_p = true;
1657 if (decr_p)
1658 cost = -cost;
1659 costs[index] += cost;
1660 }
1661 }
1662 /* Probably 5 hops will be enough. */
1663 if (cont_p
1664 && divisor <= (COST_HOP_DIVISOR4
1665 * COST_HOP_DIVISOR4
1666 * COST_HOP_DIVISOR4
1667 * COST_HOP_DIVISOR4))
1668 queue_update_cost (another_allocno, start, from, divisor * COST_HOP_DIVISOR4);
1669 }
1670}
1671
1672/* Set up conflicting (through CONFLICT_REGS) for each object of
1673 allocno A and the start allocno profitable regs (through
1674 START_PROFITABLE_REGS). Remember that the start profitable regs
1675 exclude hard regs which cannot hold value of mode of allocno A.
1676 This covers mostly cases when multi-register value should be
1677 aligned. */
1678static inline void
1679get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1680 HARD_REG_SET *conflict_regs,
1681 HARD_REG_SET *start_profitable_regs)
1682{
1683 int i, nwords;
1684 ira_object_t obj;
1685
1686 nwords = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
1687 for (i = 0; i < nwords; i++)
1688 {
1689 obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]);
1690 conflict_regs[i] = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs);
1691 }
1692 if (retry_p)
1693 *start_profitable_regs
1694 = (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[ALLOCNO_CLASS (a)((a)->aclass)]
1695 &~ (ira_prohibited_class_mode_regs(this_target_ira->x_ira_prohibited_class_mode_regs)
1696 [ALLOCNO_CLASS (a)((a)->aclass)][ALLOCNO_MODE (a)((a)->mode)]));
1697 else
1698 *start_profitable_regs = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->profitable_hard_regs;
1699}
1700
1701/* Return true if HARD_REGNO is ok for assigning to allocno A with
1702 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1703static inline bool
1704check_hard_reg_p (ira_allocno_t a, int hard_regno,
1705 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1706{
1707 int j, nwords, nregs;
1708 enum reg_class aclass;
1709 machine_mode mode;
1710
1711 aclass = ALLOCNO_CLASS (a)((a)->aclass);
1712 mode = ALLOCNO_MODE (a)((a)->mode);
1713 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs(this_target_ira->x_ira_prohibited_class_mode_regs)[aclass][mode],
1714 hard_regno))
1715 return false;
1716 /* Checking only profitable hard regs. */
1717 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1718 return false;
1719 nregs = hard_regno_nregs (hard_regno, mode);
1720 nwords = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
1721 for (j = 0; j < nregs; j++)
1722 {
1723 int k;
1724 int set_to_test_start = 0, set_to_test_end = nwords;
1725
1726 if (nregs == nwords)
1727 {
1728 if (REG_WORDS_BIG_ENDIAN0)
1729 set_to_test_start = nwords - j - 1;
1730 else
1731 set_to_test_start = j;
1732 set_to_test_end = set_to_test_start + 1;
1733 }
1734 for (k = set_to_test_start; k < set_to_test_end; k++)
1735 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1736 break;
1737 if (k != set_to_test_end)
1738 break;
1739 }
1740 return j == nregs;
1741}
1742
1743/* Return number of registers needed to be saved and restored at
1744 function prologue/epilogue if we allocate HARD_REGNO to hold value
1745 of MODE. */
1746static int
1747calculate_saved_nregs (int hard_regno, machine_mode mode)
1748{
1749 int i;
1750 int nregs = 0;
1751
1752 ira_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1752, __FUNCTION__), 0 : 0))
;
1753 for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
1754 if (!allocated_hardreg_p[hard_regno + i]
1755 && !crtl(&x_rtl)->abi->clobbers_full_reg_p (hard_regno + i)
1756 && !LOCAL_REGNO (hard_regno + i)0)
1757 nregs++;
1758 return nregs;
1759}
1760
1761/* Allocnos A1 and A2 are known to conflict. Check whether, in some loop L
1762 that is either the current loop or a nested subloop, the conflict is of
1763 the following form:
1764
1765 - One allocno (X) is a cap allocno for some non-cap allocno X2.
1766
1767 - X2 belongs to some loop L2.
1768
1769 - The other allocno (Y) is a non-cap allocno.
1770
1771 - Y is an ancestor of some allocno Y2 in L2. (Note that such a Y2
1772 must exist, given that X and Y conflict.)
1773
1774 - Y2 is not referenced in L2 (that is, ALLOCNO_NREFS (Y2) == 0).
1775
1776 - Y can use a different allocation from Y2.
1777
1778 In this case, Y's register is live across L2 but is not used within it,
1779 whereas X's register is used only within L2. The conflict is therefore
1780 only "soft", in that it can easily be avoided by spilling Y2 inside L2
1781 without affecting any insn references.
1782
1783 If the conflict does have this form, return the Y2 that would need to be
1784 spilled in order to allow X and Y (and thus A1 and A2) to use the same
1785 register. Return null otherwise. Returning null is conservatively correct;
1786 any nonnnull return value is an optimization. */
1787ira_allocno_t
1788ira_soft_conflict (ira_allocno_t a1, ira_allocno_t a2)
1789{
1790 /* Search for the loop L and its associated allocnos X and Y. */
1791 int search_depth = 0;
1792 while (ALLOCNO_CAP_MEMBER (a1)((a1)->cap_member) && ALLOCNO_CAP_MEMBER (a2)((a2)->cap_member))
1793 {
1794 a1 = ALLOCNO_CAP_MEMBER (a1)((a1)->cap_member);
1795 a2 = ALLOCNO_CAP_MEMBER (a2)((a2)->cap_member);
1796 if (search_depth++ > max_soft_conflict_loop_depth)
1797 return nullptr;
1798 }
1799 /* This must be true if A1 and A2 conflict. */
1800 ira_assert (ALLOCNO_LOOP_TREE_NODE (a1) == ALLOCNO_LOOP_TREE_NODE (a2))((void)(!(((a1)->loop_tree_node) == ((a2)->loop_tree_node
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1800, __FUNCTION__), 0 : 0))
;
1801
1802 /* Make A1 the cap allocno (X in the comment above) and A2 the
1803 non-cap allocno (Y in the comment above). */
1804 if (ALLOCNO_CAP_MEMBER (a2)((a2)->cap_member))
1805 std::swap (a1, a2);
1806 if (!ALLOCNO_CAP_MEMBER (a1)((a1)->cap_member))
1807 return nullptr;
1808
1809 /* Search for the real allocno that A1 caps (X2 in the comment above). */
1810 do
1811 {
1812 a1 = ALLOCNO_CAP_MEMBER (a1)((a1)->cap_member);
1813 if (search_depth++ > max_soft_conflict_loop_depth)
1814 return nullptr;
1815 }
1816 while (ALLOCNO_CAP_MEMBER (a1)((a1)->cap_member));
1817
1818 /* Find the associated allocno for A2 (Y2 in the comment above). */
1819 auto node = ALLOCNO_LOOP_TREE_NODE (a1)((a1)->loop_tree_node);
1820 auto local_a2 = node->regno_allocno_map[ALLOCNO_REGNO (a2)((a2)->regno)];
1821
1822 /* Find the parent of LOCAL_A2/Y2. LOCAL_A2 must be a descendant of A2
1823 for the conflict query to make sense, so this parent lookup must succeed.
1824
1825 If the parent allocno has no references, it is usually cheaper to
1826 spill at that loop level instead. Keep searching until we find
1827 a parent allocno that does have references (but don't look past
1828 the starting allocno). */
1829 ira_allocno_t local_parent_a2;
1830 for (;;)
1831 {
1832 local_parent_a2 = ira_parent_allocno (local_a2);
1833 if (local_parent_a2 == a2 || ALLOCNO_NREFS (local_parent_a2)((local_parent_a2)->nrefs) != 0)
1834 break;
1835 local_a2 = local_parent_a2;
1836 }
1837 if (CHECKING_P1)
1838 {
1839 /* Sanity check to make sure that the conflict we've been given
1840 makes sense. */
1841 auto test_a2 = local_parent_a2;
1842 while (test_a2 != a2)
1843 {
1844 test_a2 = ira_parent_allocno (test_a2);
1845 ira_assert (test_a2)((void)(!(test_a2) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1845, __FUNCTION__), 0 : 0))
;
1846 }
1847 }
1848 if (local_a2
1849 && ALLOCNO_NREFS (local_a2)((local_a2)->nrefs) == 0
1850 && ira_subloop_allocnos_can_differ_p (local_parent_a2))
1851 return local_a2;
1852 return nullptr;
1853}
1854
1855/* The caller has decided to allocate HREGNO to A and has proved that
1856 this is safe. However, the allocation might require the kind of
1857 spilling described in the comment above ira_soft_conflict.
1858 The caller has recorded that:
1859
1860 - The allocnos in ALLOCNOS_TO_SPILL are the ones that would need
1861 to be spilled to satisfy soft conflicts for at least one allocation
1862 (not necessarily HREGNO).
1863
1864 - The soft conflicts apply only to A allocations that overlap
1865 SOFT_CONFLICT_REGS.
1866
1867 If allocating HREGNO is subject to any soft conflicts, record the
1868 subloop allocnos that need to be spilled. */
1869static void
1870spill_soft_conflicts (ira_allocno_t a, bitmap allocnos_to_spill,
1871 HARD_REG_SET soft_conflict_regs, int hregno)
1872{
1873 auto nregs = hard_regno_nregs (hregno, ALLOCNO_MODE (a)((a)->mode));
1874 bitmap_iterator bi;
1875 unsigned int i;
1876 EXECUTE_IF_SET_IN_BITMAP (allocnos_to_spill, 0, i, bi)for (bmp_iter_set_init (&(bi), (allocnos_to_spill), (0), &
(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next (&
(bi), &(i)))
1877 {
1878 /* SPILL_A needs to be spilled for at least one allocation
1879 (not necessarily this one). */
1880 auto spill_a = ira_allocnos[i];
1881
1882 /* Find the corresponding allocno for this loop. */
1883 auto conflict_a = spill_a;
1884 do
1885 {
1886 conflict_a = ira_parent_or_cap_allocno (conflict_a);
1887 ira_assert (conflict_a)((void)(!(conflict_a) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1887, __FUNCTION__), 0 : 0))
;
1888 }
1889 while (ALLOCNO_LOOP_TREE_NODE (conflict_a)((conflict_a)->loop_tree_node)->level
1890 > ALLOCNO_LOOP_TREE_NODE (a)((a)->loop_tree_node)->level);
1891
1892 ira_assert (ALLOCNO_LOOP_TREE_NODE (conflict_a)((void)(!(((conflict_a)->loop_tree_node) == ((a)->loop_tree_node
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1893, __FUNCTION__), 0 : 0))
1893 == ALLOCNO_LOOP_TREE_NODE (a))((void)(!(((conflict_a)->loop_tree_node) == ((a)->loop_tree_node
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1893, __FUNCTION__), 0 : 0))
;
1894
1895 if (conflict_a == a)
1896 {
1897 /* SPILL_A is a descendant of A. We don't know (and don't need
1898 to know) which cap allocnos have a soft conflict with A.
1899 All we need to do is test whether the soft conflict applies
1900 to the chosen allocation. */
1901 if (ira_hard_reg_set_intersection_p (hregno, ALLOCNO_MODE (a)((a)->mode),
1902 soft_conflict_regs))
1903 ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (spill_a)((spill_a)->might_conflict_with_parent_p) = true;
1904 }
1905 else
1906 {
1907 /* SPILL_A is a descendant of CONFLICT_A, which has a soft conflict
1908 with A. Test whether the soft conflict applies to the current
1909 allocation. */
1910 ira_assert (ira_soft_conflict (a, conflict_a) == spill_a)((void)(!(ira_soft_conflict (a, conflict_a) == spill_a) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1910, __FUNCTION__), 0 : 0))
;
1911 auto conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)((conflict_a)->hard_regno);
1912 ira_assert (conflict_hregno >= 0)((void)(!(conflict_hregno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1912, __FUNCTION__), 0 : 0))
;
1913 auto conflict_nregs = hard_regno_nregs (conflict_hregno,
1914 ALLOCNO_MODE (conflict_a)((conflict_a)->mode));
1915 if (hregno + nregs > conflict_hregno
1916 && conflict_hregno + conflict_nregs > hregno)
1917 ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (spill_a)((spill_a)->might_conflict_with_parent_p) = true;
1918 }
1919 }
1920}
1921
1922/* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1923 that the function called from function
1924 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1925 this case some allocno data are not defined or updated and we
1926 should not touch these data. The function returns true if we
1927 managed to assign a hard register to the allocno.
1928
1929 To assign a hard register, first of all we calculate all conflict
1930 hard registers which can come from conflicting allocnos with
1931 already assigned hard registers. After that we find first free
1932 hard register with the minimal cost. During hard register cost
1933 calculation we take conflict hard register costs into account to
1934 give a chance for conflicting allocnos to get a better hard
1935 register in the future.
1936
1937 If the best hard register cost is bigger than cost of memory usage
1938 for the allocno, we don't assign a hard register to given allocno
1939 at all.
1940
1941 If we assign a hard register to the allocno, we update costs of the
1942 hard register for allocnos connected by copies to improve a chance
1943 to coalesce insns represented by the copies when we assign hard
1944 registers to the allocnos connected by the copies. */
1945static bool
1946assign_hard_reg (ira_allocno_t a, bool retry_p)
1947{
1948 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1949 int i, j, hard_regno, best_hard_regno, class_size;
1950 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
1951 int *a_costs;
1952 enum reg_class aclass;
1953 machine_mode mode;
1954 static int costs[FIRST_PSEUDO_REGISTER76], full_costs[FIRST_PSEUDO_REGISTER76];
1955 int saved_nregs;
1956 enum reg_class rclass;
1957 int add_cost;
1958#ifdef STACK_REGS
1959 bool no_stack_reg_p;
1960#endif
1961 auto_bitmap allocnos_to_spill;
1962 HARD_REG_SET soft_conflict_regs = {};
1963
1964 ira_assert (! ALLOCNO_ASSIGNED_P (a))((void)(!(! ((a)->assigned_p)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 1964, __FUNCTION__), 0 : 0))
;
1965 get_conflict_and_start_profitable_regs (a, retry_p,
1966 conflicting_regs,
1967 &profitable_hard_regs);
1968 aclass = ALLOCNO_CLASS (a)((a)->aclass);
1969 class_size = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[aclass];
1970 best_hard_regno = -1;
1971 mem_cost = 0;
1972 memset (costs, 0, sizeof (int) * class_size);
1973 memset (full_costs, 0, sizeof (int) * class_size);
1974#ifdef STACK_REGS
1975 no_stack_reg_p = false;
1976#endif
1977 if (! retry_p)
1978 start_update_cost ();
1979 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a)((a)->updated_memory_cost);
1980
1981 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a)((a)->updated_hard_reg_costs),
1982 aclass, ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs));
1983 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)((a)->updated_hard_reg_costs);
1984#ifdef STACK_REGS
1985 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a)((a)->total_no_stack_reg_p);
1986#endif
1987 cost = ALLOCNO_UPDATED_CLASS_COST (a)((a)->updated_class_cost);
1988 for (i = 0; i < class_size; i++)
1989 if (a_costs != NULLnullptr)
1990 {
1991 costs[i] += a_costs[i];
1992 full_costs[i] += a_costs[i];
1993 }
1994 else
1995 {
1996 costs[i] += cost;
1997 full_costs[i] += cost;
1998 }
1999 nwords = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
2000 curr_allocno_process++;
2001 for (word = 0; word < nwords; word++)
2002 {
2003 ira_object_t conflict_obj;
2004 ira_object_t obj = ALLOCNO_OBJECT (a, word)((a)->objects[word]);
2005 ira_object_conflict_iterator oci;
2006
2007 /* Take preferences of conflicting allocnos into account. */
2008 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
2009 {
2010 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
2011 enum reg_class conflict_aclass;
2012 allocno_color_data_t data = ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data));
2013
2014 /* Reload can give another class so we need to check all
2015 allocnos. */
2016 if (!retry_p
2017 && ((!ALLOCNO_ASSIGNED_P (conflict_a)((conflict_a)->assigned_p)
2018 || ALLOCNO_HARD_REGNO (conflict_a)((conflict_a)->hard_regno) < 0)
2019 && !(hard_reg_set_intersect_p
2020 (profitable_hard_regs,
2021 ALLOCNO_COLOR_DATA((allocno_color_data_t) ((conflict_a)->add_data))
2022 (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->profitable_hard_regs))))
2023 {
2024 /* All conflict allocnos are in consideration bitmap
2025 when retry_p is false. It might change in future and
2026 if it happens the assert will be broken. It means
2027 the code should be modified for the new
2028 assumptions. */
2029 ira_assert (bitmap_bit_p (consideration_allocno_bitmap,((void)(!(bitmap_bit_p (consideration_allocno_bitmap, ((conflict_a
)->num))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2030, __FUNCTION__), 0 : 0))
2030 ALLOCNO_NUM (conflict_a)))((void)(!(bitmap_bit_p (consideration_allocno_bitmap, ((conflict_a
)->num))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2030, __FUNCTION__), 0 : 0))
;
2031 continue;
2032 }
2033 conflict_aclass = ALLOCNO_CLASS (conflict_a)((conflict_a)->aclass);
2034 ira_assert (ira_reg_classes_intersect_p((void)(!((this_target_ira->x_ira_reg_classes_intersect_p)
[aclass][conflict_aclass]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2035, __FUNCTION__), 0 : 0))
2035 [aclass][conflict_aclass])((void)(!((this_target_ira->x_ira_reg_classes_intersect_p)
[aclass][conflict_aclass]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2035, __FUNCTION__), 0 : 0))
;
2036 if (ALLOCNO_ASSIGNED_P (conflict_a)((conflict_a)->assigned_p))
2037 {
2038 hard_regno = ALLOCNO_HARD_REGNO (conflict_a)((conflict_a)->hard_regno);
2039 if (hard_regno >= 0
2040 && (ira_hard_reg_set_intersection_p
2041 (hard_regno, ALLOCNO_MODE (conflict_a)((conflict_a)->mode),
2042 reg_class_contents(this_target_hard_regs->x_reg_class_contents)[aclass])))
2043 {
2044 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a)((conflict_a)->num_objects);
2045 int conflict_nregs;
2046
2047 mode = ALLOCNO_MODE (conflict_a)((conflict_a)->mode);
2048 conflict_nregs = hard_regno_nregs (hard_regno, mode);
2049 auto spill_a = (retry_p
2050 ? nullptr
2051 : ira_soft_conflict (a, conflict_a));
2052 if (spill_a)
2053 {
2054 if (bitmap_set_bit (allocnos_to_spill,
2055 ALLOCNO_NUM (spill_a)((spill_a)->num)))
2056 {
2057 ira_loop_border_costs border_costs (spill_a);
2058 auto cost = border_costs.spill_inside_loop_cost ();
2059 auto note_conflict = [&](int r)
2060 {
2061 SET_HARD_REG_BIT (soft_conflict_regs, r);
2062 auto hri = ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][r];
2063 if (hri >= 0)
2064 {
2065 costs[hri] += cost;
2066 full_costs[hri] += cost;
2067 }
2068 };
2069 for (int r = hard_regno;
2070 r >= 0 && (int) end_hard_regno (mode, r) > hard_regno;
2071 r--)
2072 note_conflict (r);
2073 for (int r = hard_regno + 1;
2074 r < hard_regno + conflict_nregs;
2075 r++)
2076 note_conflict (r);
2077 }
2078 }
2079 else
2080 {
2081 if (conflict_nregs == n_objects && conflict_nregs > 1)
2082 {
2083 int num = OBJECT_SUBWORD (conflict_obj)((conflict_obj)->subword);
2084
2085 if (REG_WORDS_BIG_ENDIAN0)
2086 SET_HARD_REG_BIT (conflicting_regs[word],
2087 hard_regno + n_objects - num - 1);
2088 else
2089 SET_HARD_REG_BIT (conflicting_regs[word],
2090 hard_regno + num);
2091 }
2092 else
2093 conflicting_regs[word]
2094 |= ira_reg_mode_hard_regset(this_target_ira_int->x_ira_reg_mode_hard_regset)[hard_regno][mode];
2095 if (hard_reg_set_subset_p (profitable_hard_regs,
2096 conflicting_regs[word]))
2097 goto fail;
2098 }
2099 }
2100 }
2101 else if (! retry_p
2102 && ! ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->may_be_spilled_p
2103 /* Don't process the conflict allocno twice. */
2104 && (ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->last_process
2105 != curr_allocno_process))
2106 {
2107 int k, *conflict_costs;
2108
2109 ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->last_process
2110 = curr_allocno_process;
2111 ira_allocate_and_copy_costs
2112 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a)((conflict_a)->updated_conflict_hard_reg_costs),
2113 conflict_aclass,
2114 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a)((conflict_a)->conflict_hard_reg_costs));
2115 conflict_costs
2116 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a)((conflict_a)->updated_conflict_hard_reg_costs);
2117 if (conflict_costs != NULLnullptr)
2118 for (j = class_size - 1; j >= 0; j--)
2119 {
2120 hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[aclass][j];
2121 ira_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2121, __FUNCTION__), 0 : 0))
;
2122 k = ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[conflict_aclass][hard_regno];
2123 if (k < 0
2124 /* If HARD_REGNO is not available for CONFLICT_A,
2125 the conflict would be ignored, since HARD_REGNO
2126 will never be assigned to CONFLICT_A. */
2127 || !TEST_HARD_REG_BIT (data->profitable_hard_regs,
2128 hard_regno))
2129 continue;
2130 full_costs[j] -= conflict_costs[k];
2131 }
2132 queue_update_cost (conflict_a, conflict_a, NULLnullptr, COST_HOP_DIVISOR4);
2133 }
2134 }
2135 }
2136 if (! retry_p)
2137 /* Take into account preferences of allocnos connected by copies to
2138 the conflict allocnos. */
2139 update_conflict_hard_regno_costs (full_costs, aclass, true);
2140
2141 /* Take preferences of allocnos connected by copies into
2142 account. */
2143 if (! retry_p)
2144 {
2145 start_update_cost ();
2146 queue_update_cost (a, a, NULLnullptr, COST_HOP_DIVISOR4);
2147 update_conflict_hard_regno_costs (full_costs, aclass, false);
2148 }
2149 min_cost = min_full_cost = INT_MAX2147483647;
2150 /* We don't care about giving callee saved registers to allocnos no
2151 living through calls because call clobbered registers are
2152 allocated first (it is usual practice to put them first in
2153 REG_ALLOC_ORDER). */
2154 mode = ALLOCNO_MODE (a)((a)->mode);
2155 for (i = 0; i < class_size; i++)
2156 {
2157 hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[aclass][i];
2158#ifdef STACK_REGS
2159 if (no_stack_reg_p
2160 && FIRST_STACK_REG8 <= hard_regno && hard_regno <= LAST_STACK_REG15)
2161 continue;
2162#endif
2163 if (! check_hard_reg_p (a, hard_regno,
2164 conflicting_regs, profitable_hard_regs))
2165 continue;
2166 cost = costs[i];
2167 full_cost = full_costs[i];
2168 if (!HONOR_REG_ALLOC_ORDER0)
2169 {
2170 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
2171 /* We need to save/restore the hard register in
2172 epilogue/prologue. Therefore we increase the cost. */
2173 {
2174 rclass = REGNO_REG_CLASS (hard_regno)(regclass_map[(hard_regno)]);
2175 add_cost = ((ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)[mode][rclass][0]
2176 + ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)[mode][rclass][1])
2177 * saved_nregs / hard_regno_nregs (hard_regno,
2178 mode) - 1);
2179 cost += add_cost;
2180 full_cost += add_cost;
2181 }
2182 }
2183 if (min_cost > cost)
2184 min_cost = cost;
2185 if (min_full_cost > full_cost)
2186 {
2187 min_full_cost = full_cost;
2188 best_hard_regno = hard_regno;
2189 ira_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2189, __FUNCTION__), 0 : 0))
;
2190 }
2191 if (internal_flag_ira_verbose > 5 && ira_dump_file != NULLnullptr)
2192 fprintf (ira_dump_file, "(%d=%d,%d) ", hard_regno, cost, full_cost);
2193 }
2194 if (internal_flag_ira_verbose > 5 && ira_dump_file != NULLnullptr)
2195 fprintf (ira_dump_file, "\n");
2196 if (min_full_cost > mem_cost
2197 /* Do not spill static chain pointer pseudo when non-local goto
2198 is used. */
2199 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)((a)->regno)))
2200 {
2201 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2202 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
2203 mem_cost, min_full_cost);
2204 best_hard_regno = -1;
2205 }
2206 fail:
2207 if (best_hard_regno >= 0)
2208 {
2209 for (i = hard_regno_nregs (best_hard_regno, mode) - 1; i >= 0; i--)
2210 allocated_hardreg_p[best_hard_regno + i] = true;
2211 spill_soft_conflicts (a, allocnos_to_spill, soft_conflict_regs,
2212 best_hard_regno);
2213 }
2214 if (! retry_p)
2215 restore_costs_from_copies (a);
2216 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = best_hard_regno;
2217 ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) = true;
2218 if (best_hard_regno >= 0 && !retry_p)
2219 update_costs_from_copies (a, true, true);
2220 ira_assert (ALLOCNO_CLASS (a) == aclass)((void)(!(((a)->aclass) == aclass) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2220, __FUNCTION__), 0 : 0))
;
2221 /* We don't need updated costs anymore. */
2222 ira_free_allocno_updated_costs (a);
2223 return best_hard_regno >= 0;
2224}
2225
2226
2227
2228/* An array used to sort copies. */
2229static ira_copy_t *sorted_copies;
2230
2231/* If allocno A is a cap, return non-cap allocno from which A is
2232 created. Otherwise, return A. */
2233static ira_allocno_t
2234get_cap_member (ira_allocno_t a)
2235{
2236 ira_allocno_t member;
2237
2238 while ((member = ALLOCNO_CAP_MEMBER (a)((a)->cap_member)) != NULLnullptr)
2239 a = member;
2240 return a;
2241}
2242
2243/* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
2244 used to find a conflict for new allocnos or allocnos with the
2245 different allocno classes. */
2246static bool
2247allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
2248{
2249 rtx reg1, reg2;
2250 int i, j;
2251 int n1 = ALLOCNO_NUM_OBJECTS (a1)((a1)->num_objects);
2252 int n2 = ALLOCNO_NUM_OBJECTS (a2)((a2)->num_objects);
2253
2254 if (a1 == a2)
2255 return false;
2256 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)((a1)->regno)];
2257 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)((a2)->regno)];
2258 if (reg1 != NULLnullptr && reg2 != NULLnullptr
2259 && ORIGINAL_REGNO (reg1)(__extension__ ({ __typeof ((reg1)) const _rtx = ((reg1)); if
(((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag
("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2259, __FUNCTION__); _rtx; })->u2.original_regno)
== ORIGINAL_REGNO (reg2)(__extension__ ({ __typeof ((reg2)) const _rtx = ((reg2)); if
(((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag
("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2259, __FUNCTION__); _rtx; })->u2.original_regno)
)
2260 return false;
2261
2262 /* We don't keep live ranges for caps because they can be quite big.
2263 Use ranges of non-cap allocno from which caps are created. */
2264 a1 = get_cap_member (a1);
2265 a2 = get_cap_member (a2);
2266 for (i = 0; i < n1; i++)
2267 {
2268 ira_object_t c1 = ALLOCNO_OBJECT (a1, i)((a1)->objects[i]);
2269
2270 for (j = 0; j < n2; j++)
2271 {
2272 ira_object_t c2 = ALLOCNO_OBJECT (a2, j)((a2)->objects[j]);
2273
2274 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1)((c1)->live_ranges),
2275 OBJECT_LIVE_RANGES (c2)((c2)->live_ranges)))
2276 return true;
2277 }
2278 }
2279 return false;
2280}
2281
2282/* The function is used to sort copies according to their execution
2283 frequencies. */
2284static int
2285copy_freq_compare_func (const void *v1p, const void *v2p)
2286{
2287 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
2288 int pri1, pri2;
2289
2290 pri1 = cp1->freq;
2291 pri2 = cp2->freq;
2292 if (pri2 - pri1)
2293 return pri2 - pri1;
2294
2295 /* If frequencies are equal, sort by copies, so that the results of
2296 qsort leave nothing to chance. */
2297 return cp1->num - cp2->num;
2298}
2299
2300
2301
2302/* Return true if any allocno from thread of A1 conflicts with any
2303 allocno from thread A2. */
2304static bool
2305allocno_thread_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
2306{
2307 ira_allocno_t a, conflict_a;
2308
2309 for (a = ALLOCNO_COLOR_DATA (a2)((allocno_color_data_t) ((a2)->add_data))->next_thread_allocno;;
2310 a = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_thread_allocno)
2311 {
2312 for (conflict_a = ALLOCNO_COLOR_DATA (a1)((allocno_color_data_t) ((a1)->add_data))->next_thread_allocno;;
2313 conflict_a = ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->next_thread_allocno)
2314 {
2315 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
2316 return true;
2317 if (conflict_a == a1)
2318 break;
2319 }
2320 if (a == a2)
2321 break;
2322 }
2323 return false;
2324}
2325
2326/* Merge two threads given correspondingly by their first allocnos T1
2327 and T2 (more accurately merging T2 into T1). */
2328static void
2329merge_threads (ira_allocno_t t1, ira_allocno_t t2)
2330{
2331 ira_allocno_t a, next, last;
2332
2333 gcc_assert (t1 != t2((void)(!(t1 != t2 && ((allocno_color_data_t) ((t1)->
add_data))->first_thread_allocno == t1 && ((allocno_color_data_t
) ((t2)->add_data))->first_thread_allocno == t2) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2335, __FUNCTION__), 0 : 0))
2334 && ALLOCNO_COLOR_DATA (t1)->first_thread_allocno == t1((void)(!(t1 != t2 && ((allocno_color_data_t) ((t1)->
add_data))->first_thread_allocno == t1 && ((allocno_color_data_t
) ((t2)->add_data))->first_thread_allocno == t2) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2335, __FUNCTION__), 0 : 0))
2335 && ALLOCNO_COLOR_DATA (t2)->first_thread_allocno == t2)((void)(!(t1 != t2 && ((allocno_color_data_t) ((t1)->
add_data))->first_thread_allocno == t1 && ((allocno_color_data_t
) ((t2)->add_data))->first_thread_allocno == t2) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2335, __FUNCTION__), 0 : 0))
;
2336 for (last = t2, a = ALLOCNO_COLOR_DATA (t2)((allocno_color_data_t) ((t2)->add_data))->next_thread_allocno;;
2337 a = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_thread_allocno)
2338 {
2339 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->first_thread_allocno = t1;
2340 if (a == t2)
2341 break;
2342 last = a;
2343 }
2344 next = ALLOCNO_COLOR_DATA (t1)((allocno_color_data_t) ((t1)->add_data))->next_thread_allocno;
2345 ALLOCNO_COLOR_DATA (t1)((allocno_color_data_t) ((t1)->add_data))->next_thread_allocno = t2;
2346 ALLOCNO_COLOR_DATA (last)((allocno_color_data_t) ((last)->add_data))->next_thread_allocno = next;
2347 ALLOCNO_COLOR_DATA (t1)((allocno_color_data_t) ((t1)->add_data))->thread_freq += ALLOCNO_COLOR_DATA (t2)((allocno_color_data_t) ((t2)->add_data))->thread_freq;
2348}
2349
2350/* Create threads by processing CP_NUM copies from sorted copies. We
2351 process the most expensive copies first. */
2352static void
2353form_threads_from_copies (int cp_num)
2354{
2355 ira_allocno_t a, thread1, thread2;
2356 ira_copy_t cp;
2357
2358 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func)gcc_qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func
)
;
2359 /* Form threads processing copies, most frequently executed
2360 first. */
2361 for (int i = 0; i < cp_num; i++)
2362 {
2363 cp = sorted_copies[i];
2364 thread1 = ALLOCNO_COLOR_DATA (cp->first)((allocno_color_data_t) ((cp->first)->add_data))->first_thread_allocno;
2365 thread2 = ALLOCNO_COLOR_DATA (cp->second)((allocno_color_data_t) ((cp->second)->add_data))->first_thread_allocno;
2366 if (thread1 == thread2)
2367 continue;
2368 if (! allocno_thread_conflict_p (thread1, thread2))
2369 {
2370 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2371 fprintf
2372 (ira_dump_file,
2373 " Forming thread by copy %d:a%dr%d-a%dr%d (freq=%d):\n",
2374 cp->num, ALLOCNO_NUM (cp->first)((cp->first)->num), ALLOCNO_REGNO (cp->first)((cp->first)->regno),
2375 ALLOCNO_NUM (cp->second)((cp->second)->num), ALLOCNO_REGNO (cp->second)((cp->second)->regno),
2376 cp->freq);
2377 merge_threads (thread1, thread2);
2378 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2379 {
2380 thread1 = ALLOCNO_COLOR_DATA (thread1)((allocno_color_data_t) ((thread1)->add_data))->first_thread_allocno;
2381 fprintf (ira_dump_file, " Result (freq=%d): a%dr%d(%d)",
2382 ALLOCNO_COLOR_DATA (thread1)((allocno_color_data_t) ((thread1)->add_data))->thread_freq,
2383 ALLOCNO_NUM (thread1)((thread1)->num), ALLOCNO_REGNO (thread1)((thread1)->regno),
2384 ALLOCNO_FREQ (thread1)((thread1)->freq));
2385 for (a = ALLOCNO_COLOR_DATA (thread1)((allocno_color_data_t) ((thread1)->add_data))->next_thread_allocno;
2386 a != thread1;
2387 a = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_thread_allocno)
2388 fprintf (ira_dump_file, " a%dr%d(%d)",
2389 ALLOCNO_NUM (a)((a)->num), ALLOCNO_REGNO (a)((a)->regno),
2390 ALLOCNO_FREQ (a)((a)->freq));
2391 fprintf (ira_dump_file, "\n");
2392 }
2393 }
2394 }
2395}
2396
2397/* Create threads by processing copies of all alocnos from BUCKET. We
2398 process the most expensive copies first. */
2399static void
2400form_threads_from_bucket (ira_allocno_t bucket)
2401{
2402 ira_allocno_t a;
2403 ira_copy_t cp, next_cp;
2404 int cp_num = 0;
2405
2406 for (a = bucket; a != NULLnullptr; a = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_bucket_allocno)
2407 {
2408 for (cp = ALLOCNO_COPIES (a)((a)->allocno_copies); cp != NULLnullptr; cp = next_cp)
2409 {
2410 if (cp->first == a)
2411 {
2412 next_cp = cp->next_first_allocno_copy;
2413 sorted_copies[cp_num++] = cp;
2414 }
2415 else if (cp->second == a)
2416 next_cp = cp->next_second_allocno_copy;
2417 else
2418 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2418, __FUNCTION__))
;
2419 }
2420 }
2421 form_threads_from_copies (cp_num);
2422}
2423
2424/* Create threads by processing copies of colorable allocno A. We
2425 process most expensive copies first. */
2426static void
2427form_threads_from_colorable_allocno (ira_allocno_t a)
2428{
2429 ira_allocno_t another_a;
2430 ira_copy_t cp, next_cp;
2431 int cp_num = 0;
2432
2433 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2434 fprintf (ira_dump_file, " Forming thread from allocno a%dr%d:\n",
2435 ALLOCNO_NUM (a)((a)->num), ALLOCNO_REGNO (a)((a)->regno));
2436 for (cp = ALLOCNO_COPIES (a)((a)->allocno_copies); cp != NULLnullptr; cp = next_cp)
2437 {
2438 if (cp->first == a)
2439 {
2440 next_cp = cp->next_first_allocno_copy;
2441 another_a = cp->second;
2442 }
2443 else if (cp->second == a)
2444 {
2445 next_cp = cp->next_second_allocno_copy;
2446 another_a = cp->first;
2447 }
2448 else
2449 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2449, __FUNCTION__))
;
2450 if ((! ALLOCNO_COLOR_DATA (another_a)((allocno_color_data_t) ((another_a)->add_data))->in_graph_p
2451 && !ALLOCNO_COLOR_DATA (another_a)((allocno_color_data_t) ((another_a)->add_data))->may_be_spilled_p)
2452 || ALLOCNO_COLOR_DATA (another_a)((allocno_color_data_t) ((another_a)->add_data))->colorable_p)
2453 sorted_copies[cp_num++] = cp;
2454 }
2455 form_threads_from_copies (cp_num);
2456}
2457
2458/* Form initial threads which contain only one allocno. */
2459static void
2460init_allocno_threads (void)
2461{
2462 ira_allocno_t a;
2463 unsigned int j;
2464 bitmap_iterator bi;
2465 ira_pref_t pref;
2466
2467 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)for (bmp_iter_set_init (&(bi), (consideration_allocno_bitmap
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
2468 {
2469 a = ira_allocnos[j];
2470 /* Set up initial thread data: */
2471 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->first_thread_allocno
2472 = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_thread_allocno = a;
2473 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->thread_freq = ALLOCNO_FREQ (a)((a)->freq);
2474 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->hard_reg_prefs = 0;
2475 for (pref = ALLOCNO_PREFS (a)((a)->allocno_prefs); pref != NULLnullptr; pref = pref->next_pref)
2476 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->hard_reg_prefs += pref->freq;
2477 }
2478}
2479
2480
2481
2482/* This page contains the allocator based on the Chaitin-Briggs algorithm. */
2483
2484/* Bucket of allocnos that can colored currently without spilling. */
2485static ira_allocno_t colorable_allocno_bucket;
2486
2487/* Bucket of allocnos that might be not colored currently without
2488 spilling. */
2489static ira_allocno_t uncolorable_allocno_bucket;
2490
2491/* The current number of allocnos in the uncolorable_bucket. */
2492static int uncolorable_allocnos_num;
2493
2494/* Return the current spill priority of allocno A. The less the
2495 number, the more preferable the allocno for spilling. */
2496static inline int
2497allocno_spill_priority (ira_allocno_t a)
2498{
2499 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
2500
2501 return (data->temp
2502 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)((a)->excess_pressure_points_num)
2503 * ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[ALLOCNO_CLASS (a)((a)->aclass)][ALLOCNO_MODE (a)((a)->mode)]
2504 + 1));
2505}
2506
2507/* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
2508 before the call. */
2509static void
2510add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
2511{
2512 ira_allocno_t first_a;
2513 allocno_color_data_t data;
2514
2515 if (bucket_ptr == &uncolorable_allocno_bucket
2516 && ALLOCNO_CLASS (a)((a)->aclass) != NO_REGS)
2517 {
2518 uncolorable_allocnos_num++;
2519 ira_assert (uncolorable_allocnos_num > 0)((void)(!(uncolorable_allocnos_num > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2519, __FUNCTION__), 0 : 0))
;
2520 }
2521 first_a = *bucket_ptr;
2522 data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
2523 data->next_bucket_allocno = first_a;
2524 data->prev_bucket_allocno = NULLnullptr;
2525 if (first_a != NULLnullptr)
2526 ALLOCNO_COLOR_DATA (first_a)((allocno_color_data_t) ((first_a)->add_data))->prev_bucket_allocno = a;
2527 *bucket_ptr = a;
2528}
2529
2530/* Compare two allocnos to define which allocno should be pushed first
2531 into the coloring stack. If the return is a negative number, the
2532 allocno given by the first parameter will be pushed first. In this
2533 case such allocno has less priority than the second one and the
2534 hard register will be assigned to it after assignment to the second
2535 one. As the result of such assignment order, the second allocno
2536 has a better chance to get the best hard register. */
2537static int
2538bucket_allocno_compare_func (const void *v1p, const void *v2p)
2539{
2540 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2541 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2542 int diff, freq1, freq2, a1_num, a2_num, pref1, pref2;
2543 ira_allocno_t t1 = ALLOCNO_COLOR_DATA (a1)((allocno_color_data_t) ((a1)->add_data))->first_thread_allocno;
2544 ira_allocno_t t2 = ALLOCNO_COLOR_DATA (a2)((allocno_color_data_t) ((a2)->add_data))->first_thread_allocno;
2545 int cl1 = ALLOCNO_CLASS (a1)((a1)->aclass), cl2 = ALLOCNO_CLASS (a2)((a2)->aclass);
2546
2547 freq1 = ALLOCNO_COLOR_DATA (t1)((allocno_color_data_t) ((t1)->add_data))->thread_freq;
2548 freq2 = ALLOCNO_COLOR_DATA (t2)((allocno_color_data_t) ((t2)->add_data))->thread_freq;
2549 if ((diff = freq1 - freq2) != 0)
2550 return diff;
2551
2552 if ((diff = ALLOCNO_NUM (t2)((t2)->num) - ALLOCNO_NUM (t1)((t1)->num)) != 0)
2553 return diff;
2554
2555 /* Push pseudos requiring less hard registers first. It means that
2556 we will assign pseudos requiring more hard registers first
2557 avoiding creation small holes in free hard register file into
2558 which the pseudos requiring more hard registers cannot fit. */
2559 if ((diff = (ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[cl1][ALLOCNO_MODE (a1)((a1)->mode)]
2560 - ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[cl2][ALLOCNO_MODE (a2)((a2)->mode)])) != 0)
2561 return diff;
2562
2563 freq1 = ALLOCNO_FREQ (a1)((a1)->freq);
2564 freq2 = ALLOCNO_FREQ (a2)((a2)->freq);
2565 if ((diff = freq1 - freq2) != 0)
2566 return diff;
2567
2568 a1_num = ALLOCNO_COLOR_DATA (a1)((allocno_color_data_t) ((a1)->add_data))->available_regs_num;
2569 a2_num = ALLOCNO_COLOR_DATA (a2)((allocno_color_data_t) ((a2)->add_data))->available_regs_num;
2570 if ((diff = a2_num - a1_num) != 0)
2571 return diff;
2572 /* Push allocnos with minimal conflict_allocno_hard_prefs first. */
2573 pref1 = ALLOCNO_COLOR_DATA (a1)((allocno_color_data_t) ((a1)->add_data))->conflict_allocno_hard_prefs;
2574 pref2 = ALLOCNO_COLOR_DATA (a2)((allocno_color_data_t) ((a2)->add_data))->conflict_allocno_hard_prefs;
2575 if ((diff = pref1 - pref2) != 0)
2576 return diff;
2577 return ALLOCNO_NUM (a2)((a2)->num) - ALLOCNO_NUM (a1)((a1)->num);
2578}
2579
2580/* Sort bucket *BUCKET_PTR and return the result through
2581 BUCKET_PTR. */
2582static void
2583sort_bucket (ira_allocno_t *bucket_ptr,
2584 int (*compare_func) (const void *, const void *))
2585{
2586 ira_allocno_t a, head;
2587 int n;
2588
2589 for (n = 0, a = *bucket_ptr;
2590 a != NULLnullptr;
2591 a = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_bucket_allocno)
2592 sorted_allocnos[n++] = a;
2593 if (n <= 1)
2594 return;
2595 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func)gcc_qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func
)
;
2596 head = NULLnullptr;
2597 for (n--; n >= 0; n--)
2598 {
2599 a = sorted_allocnos[n];
2600 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_bucket_allocno = head;
2601 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->prev_bucket_allocno = NULLnullptr;
2602 if (head != NULLnullptr)
2603 ALLOCNO_COLOR_DATA (head)((allocno_color_data_t) ((head)->add_data))->prev_bucket_allocno = a;
2604 head = a;
2605 }
2606 *bucket_ptr = head;
2607}
2608
2609/* Add ALLOCNO to colorable bucket maintaining the order according
2610 their priority. ALLOCNO should be not in a bucket before the
2611 call. */
2612static void
2613add_allocno_to_ordered_colorable_bucket (ira_allocno_t allocno)
2614{
2615 ira_allocno_t before, after;
2616
2617 form_threads_from_colorable_allocno (allocno);
2618 for (before = colorable_allocno_bucket, after = NULLnullptr;
2619 before != NULLnullptr;
2620 after = before,
2621 before = ALLOCNO_COLOR_DATA (before)((allocno_color_data_t) ((before)->add_data))->next_bucket_allocno)
2622 if (bucket_allocno_compare_func (&allocno, &before) < 0)
2623 break;
2624 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->next_bucket_allocno = before;
2625 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->prev_bucket_allocno = after;
2626 if (after == NULLnullptr)
2627 colorable_allocno_bucket = allocno;
2628 else
2629 ALLOCNO_COLOR_DATA (after)((allocno_color_data_t) ((after)->add_data))->next_bucket_allocno = allocno;
2630 if (before != NULLnullptr)
2631 ALLOCNO_COLOR_DATA (before)((allocno_color_data_t) ((before)->add_data))->prev_bucket_allocno = allocno;
2632}
2633
2634/* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
2635 the call. */
2636static void
2637delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
2638{
2639 ira_allocno_t prev_allocno, next_allocno;
2640
2641 if (bucket_ptr == &uncolorable_allocno_bucket
2642 && ALLOCNO_CLASS (allocno)((allocno)->aclass) != NO_REGS)
2643 {
2644 uncolorable_allocnos_num--;
2645 ira_assert (uncolorable_allocnos_num >= 0)((void)(!(uncolorable_allocnos_num >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2645, __FUNCTION__), 0 : 0))
;
2646 }
2647 prev_allocno = ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->prev_bucket_allocno;
2648 next_allocno = ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->next_bucket_allocno;
2649 if (prev_allocno != NULLnullptr)
2650 ALLOCNO_COLOR_DATA (prev_allocno)((allocno_color_data_t) ((prev_allocno)->add_data))->next_bucket_allocno = next_allocno;
2651 else
2652 {
2653 ira_assert (*bucket_ptr == allocno)((void)(!(*bucket_ptr == allocno) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2653, __FUNCTION__), 0 : 0))
;
2654 *bucket_ptr = next_allocno;
2655 }
2656 if (next_allocno != NULLnullptr)
2657 ALLOCNO_COLOR_DATA (next_allocno)((allocno_color_data_t) ((next_allocno)->add_data))->prev_bucket_allocno = prev_allocno;
2658}
2659
2660/* Put allocno A onto the coloring stack without removing it from its
2661 bucket. Pushing allocno to the coloring stack can result in moving
2662 conflicting allocnos from the uncolorable bucket to the colorable
2663 one. Update conflict_allocno_hard_prefs of the conflicting
2664 allocnos which are not on stack yet. */
2665static void
2666push_allocno_to_stack (ira_allocno_t a)
2667{
2668 enum reg_class aclass;
2669 allocno_color_data_t data, conflict_data;
2670 int size, i, n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
2671
2672 data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
2673 data->in_graph_p = false;
2674 allocno_stack_vec.safe_push (a);
2675 aclass = ALLOCNO_CLASS (a)((a)->aclass);
2676 if (aclass == NO_REGS)
2677 return;
2678 size = ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[aclass][ALLOCNO_MODE (a)((a)->mode)];
2679 if (n > 1)
2680 {
2681 /* We will deal with the subwords individually. */
2682 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a))((void)(!(size == ((a)->num_objects)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2682, __FUNCTION__), 0 : 0))
;
2683 size = 1;
2684 }
2685 for (i = 0; i < n; i++)
2686 {
2687 ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]);
2688 ira_object_t conflict_obj;
2689 ira_object_conflict_iterator oci;
2690
2691 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
2692 {
2693 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
2694 ira_pref_t pref;
2695
2696 conflict_data = ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data));
2697 if (! conflict_data->in_graph_p
2698 || ALLOCNO_ASSIGNED_P (conflict_a)((conflict_a)->assigned_p)
2699 || !(hard_reg_set_intersect_p
2700 (ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->profitable_hard_regs,
2701 conflict_data->profitable_hard_regs)))
2702 continue;
2703 for (pref = ALLOCNO_PREFS (a)((a)->allocno_prefs); pref != NULLnullptr; pref = pref->next_pref)
2704 conflict_data->conflict_allocno_hard_prefs -= pref->freq;
2705 if (conflict_data->colorable_p)
2706 continue;
2707 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,((void)(!(bitmap_bit_p (coloring_allocno_bitmap, ((conflict_a
)->num))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2708, __FUNCTION__), 0 : 0))
2708 ALLOCNO_NUM (conflict_a)))((void)(!(bitmap_bit_p (coloring_allocno_bitmap, ((conflict_a
)->num))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2708, __FUNCTION__), 0 : 0))
;
2709 if (update_left_conflict_sizes_p (conflict_a, a, size))
2710 {
2711 delete_allocno_from_bucket
2712 (conflict_a, &uncolorable_allocno_bucket);
2713 add_allocno_to_ordered_colorable_bucket (conflict_a);
2714 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULLnullptr)
2715 {
2716 fprintf (ira_dump_file, " Making");
2717 ira_print_expanded_allocno (conflict_a);
2718 fprintf (ira_dump_file, " colorable\n");
2719 }
2720 }
2721
2722 }
2723 }
2724}
2725
2726/* Put ALLOCNO onto the coloring stack and remove it from its bucket.
2727 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
2728static void
2729remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
2730{
2731 if (colorable_p)
2732 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
2733 else
2734 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
2735 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2736 {
2737 fprintf (ira_dump_file, " Pushing");
2738 ira_print_expanded_allocno (allocno);
2739 if (colorable_p)
2740 fprintf (ira_dump_file, "(cost %d)\n",
2741 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->temp);
2742 else
2743 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
2744 ALLOCNO_BAD_SPILL_P (allocno)((allocno)->bad_spill_p) ? "bad spill, " : "",
2745 allocno_spill_priority (allocno),
2746 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->temp);
2747 }
2748 if (! colorable_p)
2749 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->may_be_spilled_p = true;
2750 push_allocno_to_stack (allocno);
2751}
2752
2753/* Put all allocnos from colorable bucket onto the coloring stack. */
2754static void
2755push_only_colorable (void)
2756{
2757 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2758 fprintf (ira_dump_file, " Forming thread from colorable bucket:\n");
2759 form_threads_from_bucket (colorable_allocno_bucket);
2760 for (ira_allocno_t a = colorable_allocno_bucket;
2761 a != NULLnullptr;
2762 a = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_bucket_allocno)
2763 update_costs_from_prefs (a);
2764 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
2765 for (;colorable_allocno_bucket != NULLnullptr;)
2766 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
2767}
2768
2769/* Return the frequency of exit edges (if EXIT_P) or entry from/to the
2770 loop given by its LOOP_NODE. */
2771int
2772ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
2773{
2774 int freq, i;
2775 edge_iterator ei;
2776 edge e;
2777
2778 ira_assert (current_loops != NULL && loop_node->loop != NULL((void)(!(((cfun + 0)->x_current_loops) != nullptr &&
loop_node->loop != nullptr && (regno < 0 || regno
>= 76)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2779, __FUNCTION__), 0 : 0))
2779 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER))((void)(!(((cfun + 0)->x_current_loops) != nullptr &&
loop_node->loop != nullptr && (regno < 0 || regno
>= 76)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2779, __FUNCTION__), 0 : 0))
;
2780 freq = 0;
2781 if (! exit_p)
2782 {
2783 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)for ((ei) = ei_start_1 (&((loop_node->loop->header->
preds))); ei_cond ((ei), &(e)); ei_next (&(ei)))
2784 if (e->src != loop_node->loop->latch
2785 && (regno < 0
2786 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2787 && bitmap_bit_p (df_get_live_in (e->dest), regno))))
2788 freq += EDGE_FREQUENCY (e)e->count ().to_frequency ((cfun + 0));
2789 }
2790 else
2791 {
2792 auto_vec<edge> edges = get_loop_exit_edges (loop_node->loop);
2793 FOR_EACH_VEC_ELT (edges, i, e)for (i = 0; (edges).iterate ((i), &(e)); ++(i))
2794 if (regno < 0
2795 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2796 && bitmap_bit_p (df_get_live_in (e->dest), regno)))
2797 freq += EDGE_FREQUENCY (e)e->count ().to_frequency ((cfun + 0));
2798 }
2799
2800 return REG_FREQ_FROM_EDGE_FREQ (freq)(optimize_function_for_size_p ((cfun + 0)) ? 1000 : (freq * 1000
/ 10000) ? (freq * 1000 / 10000) : 1)
;
2801}
2802
2803/* Construct an object that describes the boundary between A and its
2804 parent allocno. */
2805ira_loop_border_costs::ira_loop_border_costs (ira_allocno_t a)
2806 : m_mode (ALLOCNO_MODE (a)((a)->mode)),
2807 m_class (ALLOCNO_CLASS (a)((a)->aclass)),
2808 m_entry_freq (ira_loop_edge_freq (ALLOCNO_LOOP_TREE_NODE (a)((a)->loop_tree_node),
2809 ALLOCNO_REGNO (a)((a)->regno), false)),
2810 m_exit_freq (ira_loop_edge_freq (ALLOCNO_LOOP_TREE_NODE (a)((a)->loop_tree_node),
2811 ALLOCNO_REGNO (a)((a)->regno), true))
2812{
2813}
2814
2815/* Calculate and return the cost of putting allocno A into memory. */
2816static int
2817calculate_allocno_spill_cost (ira_allocno_t a)
2818{
2819 int regno, cost;
2820 ira_allocno_t parent_allocno;
2821 ira_loop_tree_node_t parent_node, loop_node;
2822
2823 regno = ALLOCNO_REGNO (a)((a)->regno);
2824 cost = ALLOCNO_UPDATED_MEMORY_COST (a)((a)->updated_memory_cost) - ALLOCNO_UPDATED_CLASS_COST (a)((a)->updated_class_cost);
2825 if (ALLOCNO_CAP (a)((a)->cap) != NULLnullptr)
2826 return cost;
2827 loop_node = ALLOCNO_LOOP_TREE_NODE (a)((a)->loop_tree_node);
2828 if ((parent_node = loop_node->parent) == NULLnullptr)
2829 return cost;
2830 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULLnullptr)
2831 return cost;
2832 ira_loop_border_costs border_costs (a);
2833 if (ALLOCNO_HARD_REGNO (parent_allocno)((parent_allocno)->hard_regno) < 0)
2834 cost -= border_costs.spill_outside_loop_cost ();
2835 else
2836 cost += (border_costs.spill_inside_loop_cost ()
2837 - border_costs.move_between_loops_cost ());
2838 return cost;
2839}
2840
2841/* Used for sorting allocnos for spilling. */
2842static inline int
2843allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
2844{
2845 int pri1, pri2, diff;
2846
2847 /* Avoid spilling static chain pointer pseudo when non-local goto is
2848 used. */
2849 if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a1)((a1)->regno)))
2850 return 1;
2851 else if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a2)((a2)->regno)))
2852 return -1;
2853 if (ALLOCNO_BAD_SPILL_P (a1)((a1)->bad_spill_p) && ! ALLOCNO_BAD_SPILL_P (a2)((a2)->bad_spill_p))
2854 return 1;
2855 if (ALLOCNO_BAD_SPILL_P (a2)((a2)->bad_spill_p) && ! ALLOCNO_BAD_SPILL_P (a1)((a1)->bad_spill_p))
2856 return -1;
2857 pri1 = allocno_spill_priority (a1);
2858 pri2 = allocno_spill_priority (a2);
2859 if ((diff = pri1 - pri2) != 0)
2860 return diff;
2861 if ((diff
2862 = ALLOCNO_COLOR_DATA (a1)((allocno_color_data_t) ((a1)->add_data))->temp - ALLOCNO_COLOR_DATA (a2)((allocno_color_data_t) ((a2)->add_data))->temp) != 0)
2863 return diff;
2864 return ALLOCNO_NUM (a1)((a1)->num) - ALLOCNO_NUM (a2)((a2)->num);
2865}
2866
2867/* Used for sorting allocnos for spilling. */
2868static int
2869allocno_spill_sort_compare (const void *v1p, const void *v2p)
2870{
2871 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2872 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2873
2874 return allocno_spill_priority_compare (p1, p2);
2875}
2876
2877/* Push allocnos to the coloring stack. The order of allocnos in the
2878 stack defines the order for the subsequent coloring. */
2879static void
2880push_allocnos_to_stack (void)
2881{
2882 ira_allocno_t a;
2883 int cost;
2884
2885 /* Calculate uncolorable allocno spill costs. */
2886 for (a = uncolorable_allocno_bucket;
2887 a != NULLnullptr;
2888 a = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->next_bucket_allocno)
2889 if (ALLOCNO_CLASS (a)((a)->aclass) != NO_REGS)
2890 {
2891 cost = calculate_allocno_spill_cost (a);
2892 /* ??? Remove cost of copies between the coalesced
2893 allocnos. */
2894 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->temp = cost;
2895 }
2896 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2897 for (;;)
2898 {
2899 push_only_colorable ();
2900 a = uncolorable_allocno_bucket;
2901 if (a == NULLnullptr)
2902 break;
2903 remove_allocno_from_bucket_and_push (a, false);
2904 }
2905 ira_assert (colorable_allocno_bucket == NULL((void)(!(colorable_allocno_bucket == nullptr && uncolorable_allocno_bucket
== nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2906, __FUNCTION__), 0 : 0))
2906 && uncolorable_allocno_bucket == NULL)((void)(!(colorable_allocno_bucket == nullptr && uncolorable_allocno_bucket
== nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2906, __FUNCTION__), 0 : 0))
;
2907 ira_assert (uncolorable_allocnos_num == 0)((void)(!(uncolorable_allocnos_num == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2907, __FUNCTION__), 0 : 0))
;
2908}
2909
2910/* Pop the coloring stack and assign hard registers to the popped
2911 allocnos. */
2912static void
2913pop_allocnos_from_stack (void)
2914{
2915 ira_allocno_t allocno;
2916 enum reg_class aclass;
2917
2918 for (;allocno_stack_vec.length () != 0;)
2919 {
2920 allocno = allocno_stack_vec.pop ();
2921 aclass = ALLOCNO_CLASS (allocno)((allocno)->aclass);
2922 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2923 {
2924 fprintf (ira_dump_file, " Popping");
2925 ira_print_expanded_allocno (allocno);
2926 fprintf (ira_dump_file, " -- ");
2927 }
2928 if (aclass == NO_REGS)
2929 {
2930 ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) = -1;
2931 ALLOCNO_ASSIGNED_P (allocno)((allocno)->assigned_p) = true;
2932 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL)((void)(!(((allocno)->updated_hard_reg_costs) == nullptr) ?
fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2932, __FUNCTION__), 0 : 0))
;
2933 ira_assert((void)(!(((allocno)->updated_conflict_hard_reg_costs) == nullptr
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2934, __FUNCTION__), 0 : 0))
2934 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL)((void)(!(((allocno)->updated_conflict_hard_reg_costs) == nullptr
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 2934, __FUNCTION__), 0 : 0))
;
2935 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2936 fprintf (ira_dump_file, "assign memory\n");
2937 }
2938 else if (assign_hard_reg (allocno, false))
2939 {
2940 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2941 fprintf (ira_dump_file, " assign reg %d\n",
2942 ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno));
2943 }
2944 else if (ALLOCNO_ASSIGNED_P (allocno)((allocno)->assigned_p))
2945 {
2946 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
2947 fprintf (ira_dump_file, "spill%s\n",
2948 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->may_be_spilled_p
2949 ? "" : "!");
2950 }
2951 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->in_graph_p = true;
2952 }
2953}
2954
2955/* Set up number of available hard registers for allocno A. */
2956static void
2957setup_allocno_available_regs_num (ira_allocno_t a)
2958{
2959 int i, n, hard_regno, hard_regs_num, nwords;
2960 enum reg_class aclass;
2961 allocno_color_data_t data;
2962
2963 aclass = ALLOCNO_CLASS (a)((a)->aclass);
2964 data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
2965 data->available_regs_num = 0;
2966 if (aclass == NO_REGS)
2967 return;
2968 hard_regs_num = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[aclass];
2969 nwords = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
2970 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
2971 {
2972 hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[aclass][i];
2973 /* Checking only profitable hard regs. */
2974 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
2975 n++;
2976 }
2977 data->available_regs_num = n;
2978 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULLnullptr)
2979 return;
2980 fprintf
2981 (ira_dump_file,
2982 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2983 ALLOCNO_NUM (a)((a)->num), ALLOCNO_REGNO (a)((a)->regno),
2984 reg_class_names[aclass], ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[aclass], n);
2985 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2986 fprintf (ira_dump_file, ", %snode: ",
2987 data->profitable_hard_regs == data->hard_regs_node->hard_regs->set
2988 ? "" : "^");
2989 print_hard_reg_set (ira_dump_file,
2990 data->hard_regs_node->hard_regs->set, false);
2991 for (i = 0; i < nwords; i++)
2992 {
2993 ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]);
2994
2995 if (nwords != 1)
2996 {
2997 if (i != 0)
2998 fprintf (ira_dump_file, ", ");
2999 fprintf (ira_dump_file, " obj %d", i);
3000 }
3001 fprintf (ira_dump_file, " (confl regs = ");
3002 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs),
3003 false);
3004 fprintf (ira_dump_file, ")");
3005 }
3006 fprintf (ira_dump_file, "\n");
3007}
3008
3009/* Put ALLOCNO in a bucket corresponding to its number and size of its
3010 conflicting allocnos and hard registers. */
3011static void
3012put_allocno_into_bucket (ira_allocno_t allocno)
3013{
3014 ALLOCNO_COLOR_DATA (allocno)((allocno_color_data_t) ((allocno)->add_data))->in_graph_p = true;
3015 setup_allocno_available_regs_num (allocno);
3016 if (setup_left_conflict_sizes_p (allocno))
3017 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
3018 else
3019 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
3020}
3021
3022/* Map: allocno number -> allocno priority. */
3023static int *allocno_priorities;
3024
3025/* Set up priorities for N allocnos in array
3026 CONSIDERATION_ALLOCNOS. */
3027static void
3028setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
3029{
3030 int i, length, nrefs, priority, max_priority, mult, diff;
3031 ira_allocno_t a;
3032
3033 max_priority = 0;
3034 for (i = 0; i < n; i++)
3035 {
3036 a = consideration_allocnos[i];
3037 nrefs = ALLOCNO_NREFS (a)((a)->nrefs);
3038 ira_assert (nrefs >= 0)((void)(!(nrefs >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3038, __FUNCTION__), 0 : 0))
;
3039 mult = floor_log2 (ALLOCNO_NREFS (a)((a)->nrefs)) + 1;
3040 ira_assert (mult >= 0)((void)(!(mult >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3040, __FUNCTION__), 0 : 0))
;
3041 mult *= ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[ALLOCNO_CLASS (a)((a)->aclass)][ALLOCNO_MODE (a)((a)->mode)];
3042 diff = ALLOCNO_MEMORY_COST (a)((a)->memory_cost) - ALLOCNO_CLASS_COST (a)((a)->class_cost);
3043#ifdef __has_builtin
3044#if0 __has_builtin(__builtin_smul_overflow)1
3045#define HAS_SMUL_OVERFLOW
3046#endif
3047#endif
3048 /* Multiplication can overflow for very large functions.
3049 Check the overflow and constrain the result if necessary: */
3050#ifdef HAS_SMUL_OVERFLOW
3051 if (__builtin_smul_overflow (mult, diff, &priority)
3052 || priority < -INT_MAX2147483647)
3053 priority = diff >= 0 ? INT_MAX2147483647 : -INT_MAX2147483647;
3054#else
3055 static_assert
3056 (sizeof (long long) >= 2 * sizeof (int),
3057 "overflow code does not work for such int and long long sizes");
3058 long long priorityll = (long long) mult * diff;
3059 if (priorityll < -INT_MAX2147483647 || priorityll > INT_MAX2147483647)
3060 priority = diff >= 0 ? INT_MAX2147483647 : -INT_MAX2147483647;
3061 else
3062 priority = priorityll;
3063#endif
3064 allocno_priorities[ALLOCNO_NUM (a)((a)->num)] = priority;
3065 if (priority < 0)
3066 priority = -priority;
3067 if (max_priority < priority)
3068 max_priority = priority;
3069 }
3070 mult = max_priority == 0 ? 1 : INT_MAX2147483647 / max_priority;
3071 for (i = 0; i < n; i++)
3072 {
3073 a = consideration_allocnos[i];
3074 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)((a)->excess_pressure_points_num);
3075 if (ALLOCNO_NUM_OBJECTS (a)((a)->num_objects) > 1)
3076 length /= ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
3077 if (length <= 0)
3078 length = 1;
3079 allocno_priorities[ALLOCNO_NUM (a)((a)->num)]
3080 = allocno_priorities[ALLOCNO_NUM (a)((a)->num)] * mult / length;
3081 }
3082}
3083
3084/* Sort allocnos according to the profit of usage of a hard register
3085 instead of memory for them. */
3086static int
3087allocno_cost_compare_func (const void *v1p, const void *v2p)
3088{
3089 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
3090 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
3091 int c1, c2;
3092
3093 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1)((p1)->updated_memory_cost) - ALLOCNO_UPDATED_CLASS_COST (p1)((p1)->updated_class_cost);
3094 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2)((p2)->updated_memory_cost) - ALLOCNO_UPDATED_CLASS_COST (p2)((p2)->updated_class_cost);
3095 if (c1 - c2)
3096 return c1 - c2;
3097
3098 /* If regs are equally good, sort by allocno numbers, so that the
3099 results of qsort leave nothing to chance. */
3100 return ALLOCNO_NUM (p1)((p1)->num) - ALLOCNO_NUM (p2)((p2)->num);
3101}
3102
3103/* Return savings on removed copies when ALLOCNO is assigned to
3104 HARD_REGNO. */
3105static int
3106allocno_copy_cost_saving (ira_allocno_t allocno, int hard_regno)
3107{
3108 int cost = 0;
3109 machine_mode allocno_mode = ALLOCNO_MODE (allocno)((allocno)->mode);
3110 enum reg_class rclass;
3111 ira_copy_t cp, next_cp;
3112
3113 rclass = REGNO_REG_CLASS (hard_regno)(regclass_map[(hard_regno)]);
3114 if (ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[rclass][allocno_mode]
3115 > ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[rclass])
3116 /* For the above condition the cost can be wrong. Use the allocno
3117 class in this case. */
3118 rclass = ALLOCNO_CLASS (allocno)((allocno)->aclass);
3119 for (cp = ALLOCNO_COPIES (allocno)((allocno)->allocno_copies); cp != NULLnullptr; cp = next_cp)
3120 {
3121 if (cp->first == allocno)
3122 {
3123 next_cp = cp->next_first_allocno_copy;
3124 if (ALLOCNO_HARD_REGNO (cp->second)((cp->second)->hard_regno) != hard_regno)
3125 continue;
3126 }
3127 else if (cp->second == allocno)
3128 {
3129 next_cp = cp->next_second_allocno_copy;
3130 if (ALLOCNO_HARD_REGNO (cp->first)((cp->first)->hard_regno) != hard_regno)
3131 continue;
3132 }
3133 else
3134 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3134, __FUNCTION__))
;
3135 ira_init_register_move_cost_if_necessary (allocno_mode);
3136 cost += cp->freq * ira_register_move_cost(this_target_ira_int->x_ira_register_move_cost)[allocno_mode][rclass][rclass];
3137 }
3138 return cost;
3139}
3140
3141/* We used Chaitin-Briggs coloring to assign as many pseudos as
3142 possible to hard registers. Let us try to improve allocation with
3143 cost point of view. This function improves the allocation by
3144 spilling some allocnos and assigning the freed hard registers to
3145 other allocnos if it decreases the overall allocation cost. */
3146static void
3147improve_allocation (void)
3148{
3149 unsigned int i;
3150 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
3151 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
3152 bool try_p;
3153 enum reg_class aclass;
3154 machine_mode mode;
3155 int *allocno_costs;
3156 int costs[FIRST_PSEUDO_REGISTER76];
3157 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
3158 ira_allocno_t a;
3159 bitmap_iterator bi;
3160
3161 /* Don't bother to optimize the code with static chain pointer and
3162 non-local goto in order not to spill the chain pointer
3163 pseudo. */
3164 if (cfun(cfun + 0)->static_chain_decl && crtl(&x_rtl)->has_nonlocal_goto)
3165 return;
3166 /* Clear counts used to process conflicting allocnos only once for
3167 each allocno. */
3168 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
3169 ALLOCNO_COLOR_DATA (ira_allocnos[i])((allocno_color_data_t) ((ira_allocnos[i])->add_data))->temp = 0;
3170 check = n = 0;
3171 /* Process each allocno and try to assign a hard register to it by
3172 spilling some its conflicting allocnos. */
3173 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
3174 {
3175 a = ira_allocnos[i];
3176 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->temp = 0;
3177 if (empty_profitable_hard_regs (a))
3178 continue;
3179 check++;
3180 aclass = ALLOCNO_CLASS (a)((a)->aclass);
3181 allocno_costs = ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs);
3182 if ((hregno = ALLOCNO_HARD_REGNO (a)((a)->hard_regno)) < 0)
3183 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a)((a)->updated_memory_cost);
3184 else if (allocno_costs == NULLnullptr)
3185 /* It means that assigning a hard register is not profitable
3186 (we don't waste memory for hard register costs in this
3187 case). */
3188 continue;
3189 else
3190 base_cost = (allocno_costs[ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][hregno]]
3191 - allocno_copy_cost_saving (a, hregno));
3192 try_p = false;
3193 get_conflict_and_start_profitable_regs (a, false,
3194 conflicting_regs,
3195 &profitable_hard_regs);
3196 class_size = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[aclass];
3197 /* Set up cost improvement for usage of each profitable hard
3198 register for allocno A. */
3199 for (j = 0; j < class_size; j++)
3200 {
3201 hregno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[aclass][j];
3202 if (! check_hard_reg_p (a, hregno,
3203 conflicting_regs, profitable_hard_regs))
3204 continue;
3205 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j)((void)(!((this_target_ira_int->x_ira_class_hard_reg_index
)[aclass][hregno] == j) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3205, __FUNCTION__), 0 : 0))
;
3206 k = allocno_costs == NULLnullptr ? 0 : j;
3207 costs[hregno] = (allocno_costs == NULLnullptr
3208 ? ALLOCNO_UPDATED_CLASS_COST (a)((a)->updated_class_cost) : allocno_costs[k]);
3209 costs[hregno] -= allocno_copy_cost_saving (a, hregno);
3210 costs[hregno] -= base_cost;
3211 if (costs[hregno] < 0)
3212 try_p = true;
3213 }
3214 if (! try_p)
3215 /* There is no chance to improve the allocation cost by
3216 assigning hard register to allocno A even without spilling
3217 conflicting allocnos. */
3218 continue;
3219 auto_bitmap allocnos_to_spill;
3220 HARD_REG_SET soft_conflict_regs = {};
3221 mode = ALLOCNO_MODE (a)((a)->mode);
3222 nwords = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
3223 /* Process each allocno conflicting with A and update the cost
3224 improvement for profitable hard registers of A. To use a
3225 hard register for A we need to spill some conflicting
3226 allocnos and that creates penalty for the cost
3227 improvement. */
3228 for (word = 0; word < nwords; word++)
3229 {
3230 ira_object_t conflict_obj;
3231 ira_object_t obj = ALLOCNO_OBJECT (a, word)((a)->objects[word]);
3232 ira_object_conflict_iterator oci;
3233
3234 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
3235 {
3236 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
3237
3238 if (ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->temp == check)
3239 /* We already processed this conflicting allocno
3240 because we processed earlier another object of the
3241 conflicting allocno. */
3242 continue;
3243 ALLOCNO_COLOR_DATA (conflict_a)((allocno_color_data_t) ((conflict_a)->add_data))->temp = check;
3244 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)((conflict_a)->hard_regno)) < 0)
3245 continue;
3246 auto spill_a = ira_soft_conflict (a, conflict_a);
3247 if (spill_a)
3248 {
3249 if (!bitmap_set_bit (allocnos_to_spill,
3250 ALLOCNO_NUM (spill_a)((spill_a)->num)))
3251 continue;
3252 ira_loop_border_costs border_costs (spill_a);
3253 spill_cost = border_costs.spill_inside_loop_cost ();
3254 }
3255 else
3256 {
3257 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a)((conflict_a)->updated_memory_cost);
3258 k = (ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)
3259 [ALLOCNO_CLASS (conflict_a)((conflict_a)->aclass)][conflict_hregno]);
3260 ira_assert (k >= 0)((void)(!(k >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3260, __FUNCTION__), 0 : 0))
;
3261 if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a)((conflict_a)->hard_reg_costs))
3262 != NULLnullptr)
3263 spill_cost -= allocno_costs[k];
3264 else
3265 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a)((conflict_a)->updated_class_cost);
3266 spill_cost
3267 += allocno_copy_cost_saving (conflict_a, conflict_hregno);
3268 }
3269 conflict_nregs = hard_regno_nregs (conflict_hregno,
3270 ALLOCNO_MODE (conflict_a)((conflict_a)->mode));
3271 auto note_conflict = [&](int r)
3272 {
3273 if (check_hard_reg_p (a, r,
3274 conflicting_regs, profitable_hard_regs))
3275 {
3276 if (spill_a)
3277 SET_HARD_REG_BIT (soft_conflict_regs, r);
3278 costs[r] += spill_cost;
3279 }
3280 };
3281 for (r = conflict_hregno;
3282 r >= 0 && (int) end_hard_regno (mode, r) > conflict_hregno;
3283 r--)
3284 note_conflict (r);
3285 for (r = conflict_hregno + 1;
3286 r < conflict_hregno + conflict_nregs;
3287 r++)
3288 note_conflict (r);
3289 }
3290 }
3291 min_cost = INT_MAX2147483647;
3292 best = -1;
3293 /* Now we choose hard register for A which results in highest
3294 allocation cost improvement. */
3295 for (j = 0; j < class_size; j++)
3296 {
3297 hregno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[aclass][j];
3298 if (check_hard_reg_p (a, hregno,
3299 conflicting_regs, profitable_hard_regs)
3300 && min_cost > costs[hregno])
3301 {
3302 best = hregno;
3303 min_cost = costs[hregno];
3304 }
3305 }
3306 if (min_cost >= 0)
3307 /* We are in a situation when assigning any hard register to A
3308 by spilling some conflicting allocnos does not improve the
3309 allocation cost. */
3310 continue;
3311 spill_soft_conflicts (a, allocnos_to_spill, soft_conflict_regs, best);
3312 nregs = hard_regno_nregs (best, mode);
3313 /* Now spill conflicting allocnos which contain a hard register
3314 of A when we assign the best chosen hard register to it. */
3315 for (word = 0; word < nwords; word++)
3316 {
3317 ira_object_t conflict_obj;
3318 ira_object_t obj = ALLOCNO_OBJECT (a, word)((a)->objects[word]);
3319 ira_object_conflict_iterator oci;
3320
3321 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
3322 {
3323 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
3324
3325 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)((conflict_a)->hard_regno)) < 0)
3326 continue;
3327 conflict_nregs = hard_regno_nregs (conflict_hregno,
3328 ALLOCNO_MODE (conflict_a)((conflict_a)->mode));
3329 if (best + nregs <= conflict_hregno
3330 || conflict_hregno + conflict_nregs <= best)
3331 /* No intersection. */
3332 continue;
3333 ALLOCNO_HARD_REGNO (conflict_a)((conflict_a)->hard_regno) = -1;
3334 sorted_allocnos[n++] = conflict_a;
3335 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr)
3336 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
3337 ALLOCNO_NUM (conflict_a)((conflict_a)->num), ALLOCNO_REGNO (conflict_a)((conflict_a)->regno),
3338 ALLOCNO_NUM (a)((a)->num), ALLOCNO_REGNO (a)((a)->regno));
3339 }
3340 }
3341 /* Assign the best chosen hard register to A. */
3342 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = best;
3343 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr)
3344 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
3345 best, ALLOCNO_NUM (a)((a)->num), ALLOCNO_REGNO (a)((a)->regno));
3346 }
3347 if (n == 0)
3348 return;
3349 /* We spilled some allocnos to assign their hard registers to other
3350 allocnos. The spilled allocnos are now in array
3351 'sorted_allocnos'. There is still a possibility that some of the
3352 spilled allocnos can get hard registers. So let us try assign
3353 them hard registers again (just a reminder -- function
3354 'assign_hard_reg' assigns hard registers only if it is possible
3355 and profitable). We process the spilled allocnos with biggest
3356 benefit to get hard register first -- see function
3357 'allocno_cost_compare_func'. */
3358 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),gcc_qsort (sorted_allocnos, n, sizeof (ira_allocno_t), allocno_cost_compare_func
)
3359 allocno_cost_compare_func)gcc_qsort (sorted_allocnos, n, sizeof (ira_allocno_t), allocno_cost_compare_func
)
;
3360 for (j = 0; j < n; j++)
3361 {
3362 a = sorted_allocnos[j];
3363 ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) = false;
3364 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3365 {
3366 fprintf (ira_dump_file, " ");
3367 ira_print_expanded_allocno (a);
3368 fprintf (ira_dump_file, " -- ");
3369 }
3370 if (assign_hard_reg (a, false))
3371 {
3372 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3373 fprintf (ira_dump_file, "assign hard reg %d\n",
3374 ALLOCNO_HARD_REGNO (a)((a)->hard_regno));
3375 }
3376 else
3377 {
3378 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3379 fprintf (ira_dump_file, "assign memory\n");
3380 }
3381 }
3382}
3383
3384/* Sort allocnos according to their priorities. */
3385static int
3386allocno_priority_compare_func (const void *v1p, const void *v2p)
3387{
3388 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
3389 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
3390 int pri1, pri2, diff;
3391
3392 /* Assign hard reg to static chain pointer pseudo first when
3393 non-local goto is used. */
3394 if ((diff = (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a2)((a2)->regno))
3395 - non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a1)((a1)->regno)))) != 0)
3396 return diff;
3397 pri1 = allocno_priorities[ALLOCNO_NUM (a1)((a1)->num)];
3398 pri2 = allocno_priorities[ALLOCNO_NUM (a2)((a2)->num)];
3399 if (pri2 != pri1)
3400 return SORTGT (pri2, pri1)(((pri2) > (pri1)) ? 1 : -1);
3401
3402 /* If regs are equally good, sort by allocnos, so that the results of
3403 qsort leave nothing to chance. */
3404 return ALLOCNO_NUM (a1)((a1)->num) - ALLOCNO_NUM (a2)((a2)->num);
3405}
3406
3407/* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
3408 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
3409static void
3410color_allocnos (void)
3411{
3412 unsigned int i, n;
3413 bitmap_iterator bi;
3414 ira_allocno_t a;
3415
3416 setup_profitable_hard_regs ();
3417 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
3418 {
3419 allocno_color_data_t data;
3420 ira_pref_t pref, next_pref;
3421
3422 a = ira_allocnos[i];
3423 data = ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data));
3424 data->conflict_allocno_hard_prefs = 0;
3425 for (pref = ALLOCNO_PREFS (a)((a)->allocno_prefs); pref != NULLnullptr; pref = next_pref)
3426 {
3427 next_pref = pref->next_pref;
3428 if (! ira_hard_reg_in_set_p (pref->hard_regno,
3429 ALLOCNO_MODE (a)((a)->mode),
3430 data->profitable_hard_regs))
3431 ira_remove_pref (pref);
3432 }
3433 }
3434
3435 if (flag_ira_algorithmglobal_options.x_flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
3436 {
3437 n = 0;
3438 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
3439 {
3440 a = ira_allocnos[i];
3441 if (ALLOCNO_CLASS (a)((a)->aclass) == NO_REGS)
3442 {
3443 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = -1;
3444 ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) = true;
3445 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL)((void)(!(((a)->updated_hard_reg_costs) == nullptr) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3445, __FUNCTION__), 0 : 0))
;
3446 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL)((void)(!(((a)->updated_conflict_hard_reg_costs) == nullptr
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3446, __FUNCTION__), 0 : 0))
;
3447 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3448 {
3449 fprintf (ira_dump_file, " Spill");
3450 ira_print_expanded_allocno (a);
3451 fprintf (ira_dump_file, "\n");
3452 }
3453 continue;
3454 }
3455 sorted_allocnos[n++] = a;
3456 }
3457 if (n != 0)
3458 {
3459 setup_allocno_priorities (sorted_allocnos, n);
3460 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),gcc_qsort (sorted_allocnos, n, sizeof (ira_allocno_t), allocno_priority_compare_func
)
3461 allocno_priority_compare_func)gcc_qsort (sorted_allocnos, n, sizeof (ira_allocno_t), allocno_priority_compare_func
)
;
3462 for (i = 0; i < n; i++)
3463 {
3464 a = sorted_allocnos[i];
3465 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3466 {
3467 fprintf (ira_dump_file, " ");
3468 ira_print_expanded_allocno (a);
3469 fprintf (ira_dump_file, " -- ");
3470 }
3471 if (assign_hard_reg (a, false))
3472 {
3473 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3474 fprintf (ira_dump_file, "assign hard reg %d\n",
3475 ALLOCNO_HARD_REGNO (a)((a)->hard_regno));
3476 }
3477 else
3478 {
3479 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3480 fprintf (ira_dump_file, "assign memory\n");
3481 }
3482 }
3483 }
3484 }
3485 else
3486 {
3487 form_allocno_hard_regs_nodes_forest ();
3488 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr)
3489 print_hard_regs_forest (ira_dump_file);
3490 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
3491 {
3492 a = ira_allocnos[i];
3493 if (ALLOCNO_CLASS (a)((a)->aclass) != NO_REGS && ! empty_profitable_hard_regs (a))
3494 {
3495 ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->in_graph_p = true;
3496 update_conflict_allocno_hard_prefs (a);
3497 }
3498 else
3499 {
3500 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = -1;
3501 ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) = true;
3502 /* We don't need updated costs anymore. */
3503 ira_free_allocno_updated_costs (a);
3504 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3505 {
3506 fprintf (ira_dump_file, " Spill");
3507 ira_print_expanded_allocno (a);
3508 fprintf (ira_dump_file, "\n");
3509 }
3510 }
3511 }
3512 /* Put the allocnos into the corresponding buckets. */
3513 colorable_allocno_bucket = NULLnullptr;
3514 uncolorable_allocno_bucket = NULLnullptr;
3515 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
3516 {
3517 a = ira_allocnos[i];
3518 if (ALLOCNO_COLOR_DATA (a)((allocno_color_data_t) ((a)->add_data))->in_graph_p)
3519 put_allocno_into_bucket (a);
3520 }
3521 push_allocnos_to_stack ();
3522 pop_allocnos_from_stack ();
3523 finish_allocno_hard_regs_nodes_forest ();
3524 }
3525 improve_allocation ();
3526}
3527
3528
3529
3530/* Output information about the loop given by its LOOP_TREE_NODE. */
3531static void
3532print_loop_title (ira_loop_tree_node_t loop_tree_node)
3533{
3534 unsigned int j;
3535 bitmap_iterator bi;
3536 ira_loop_tree_node_t subloop_node, dest_loop_node;
3537 edge e;
3538 edge_iterator ei;
3539
3540 if (loop_tree_node->parent == NULLnullptr)
3541 fprintf (ira_dump_file,
3542 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
3543 NUM_FIXED_BLOCKS(2));
3544 else
3545 {
3546 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL)((void)(!(((cfun + 0)->x_current_loops) != nullptr &&
loop_tree_node->loop != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3546, __FUNCTION__), 0 : 0))
;
3547 fprintf (ira_dump_file,
3548 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
3549 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
3550 loop_tree_node->loop->header->index,
3551 loop_depth (loop_tree_node->loop));
3552 }
3553 for (subloop_node = loop_tree_node->children;
3554 subloop_node != NULLnullptr;
3555 subloop_node = subloop_node->next)
3556 if (subloop_node->bb != NULLnullptr)
3557 {
3558 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
3559 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)for ((ei) = ei_start_1 (&((subloop_node->bb->succs)
)); ei_cond ((ei), &(e)); ei_next (&(ei)))
3560 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)
3561 && ((dest_loop_node = IRA_BB_NODE (e->dest)__extension__ (({ ira_loop_tree_node_t _node = (&ira_bb_nodes
[(e->dest)->index]); if (_node->children != nullptr ||
_node->loop != nullptr || _node->bb == nullptr) { fprintf
(stderr, "\n%s: %d: error in %s: it is not a block node\n", "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3561, __FUNCTION__); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3561, __FUNCTION__)); } _node; }))
->parent)
3562 != loop_tree_node))
3563 fprintf (ira_dump_file, "(->%d:l%d)",
3564 e->dest->index, dest_loop_node->loop_num);
3565 }
3566 fprintf (ira_dump_file, "\n all:");
3567 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)for (bmp_iter_set_init (&(bi), (loop_tree_node->all_allocnos
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
3568 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j])((ira_allocnos[j])->regno));
3569 fprintf (ira_dump_file, "\n modified regnos:");
3570 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)for (bmp_iter_set_init (&(bi), (loop_tree_node->modified_regnos
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
3571 fprintf (ira_dump_file, " %d", j);
3572 fprintf (ira_dump_file, "\n border:");
3573 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)for (bmp_iter_set_init (&(bi), (loop_tree_node->border_allocnos
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
3574 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j])((ira_allocnos[j])->regno));
3575 fprintf (ira_dump_file, "\n Pressure:");
3576 for (j = 0; (int) j < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); j++)
3577 {
3578 enum reg_class pclass;
3579
3580 pclass = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[j];
3581 if (loop_tree_node->reg_pressure[pclass] == 0)
3582 continue;
3583 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
3584 loop_tree_node->reg_pressure[pclass]);
3585 }
3586 fprintf (ira_dump_file, "\n");
3587}
3588
3589/* Color the allocnos inside loop (in the extreme case it can be all
3590 of the function) given the corresponding LOOP_TREE_NODE. The
3591 function is called for each loop during top-down traverse of the
3592 loop tree. */
3593static void
3594color_pass (ira_loop_tree_node_t loop_tree_node)
3595{
3596 int regno, hard_regno, index = -1, n;
3597 int cost;
3598 unsigned int j;
3599 bitmap_iterator bi;
3600 machine_mode mode;
3601 enum reg_class rclass, aclass;
3602 ira_allocno_t a, subloop_allocno;
3603 ira_loop_tree_node_t subloop_node;
3604
3605 ira_assert (loop_tree_node->bb == NULL)((void)(!(loop_tree_node->bb == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3605, __FUNCTION__), 0 : 0))
;
3606 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULLnullptr)
3607 print_loop_title (loop_tree_node);
3608
3609 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
3610 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
3611 n = 0;
3612 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)for (bmp_iter_set_init (&(bi), (consideration_allocno_bitmap
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
3613 {
3614 a = ira_allocnos[j];
3615 n++;
3616 if (! ALLOCNO_ASSIGNED_P (a)((a)->assigned_p))
3617 continue;
3618 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a)((a)->num));
3619 }
3620 allocno_color_data
3621 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
3622 * n);
3623 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
3624 curr_allocno_process = 0;
3625 n = 0;
3626 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)for (bmp_iter_set_init (&(bi), (consideration_allocno_bitmap
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
3627 {
3628 a = ira_allocnos[j];
3629 ALLOCNO_ADD_DATA (a)((a)->add_data) = allocno_color_data + n;
3630 n++;
3631 }
3632 init_allocno_threads ();
3633 /* Color all mentioned allocnos including transparent ones. */
3634 color_allocnos ();
3635 /* Process caps. They are processed just once. */
3636 if (flag_ira_regionglobal_options.x_flag_ira_region == IRA_REGION_MIXED
3637 || flag_ira_regionglobal_options.x_flag_ira_region == IRA_REGION_ALL)
3638 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)for (bmp_iter_set_init (&(bi), (loop_tree_node->all_allocnos
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
3639 {
3640 a = ira_allocnos[j];
3641 if (ALLOCNO_CAP_MEMBER (a)((a)->cap_member) == NULLnullptr)
3642 continue;
3643 /* Remove from processing in the next loop. */
3644 bitmap_clear_bit (consideration_allocno_bitmap, j);
3645 rclass = ALLOCNO_CLASS (a)((a)->aclass);
3646 subloop_allocno = ALLOCNO_CAP_MEMBER (a)((a)->cap_member);
3647 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno)((subloop_allocno)->loop_tree_node);
Value stored to 'subloop_node' is never read
3648 if (ira_single_region_allocno_p (a, subloop_allocno))
3649 {
3650 mode = ALLOCNO_MODE (a)((a)->mode);
3651 hard_regno = ALLOCNO_HARD_REGNO (a)((a)->hard_regno);
3652 if (hard_regno >= 0)
3653 {
3654 index = ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[rclass][hard_regno];
3655 ira_assert (index >= 0)((void)(!(index >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3655, __FUNCTION__), 0 : 0))
;
3656 }
3657 regno = ALLOCNO_REGNO (a)((a)->regno);
3658 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno))((void)(!(!((subloop_allocno)->assigned_p)) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3658, __FUNCTION__), 0 : 0))
;
3659 ALLOCNO_HARD_REGNO (subloop_allocno)((subloop_allocno)->hard_regno) = hard_regno;
3660 ALLOCNO_ASSIGNED_P (subloop_allocno)((subloop_allocno)->assigned_p) = true;
3661 if (hard_regno >= 0)
3662 update_costs_from_copies (subloop_allocno, true, true);
3663 /* We don't need updated costs anymore. */
3664 ira_free_allocno_updated_costs (subloop_allocno);
3665 }
3666 }
3667 /* Update costs of the corresponding allocnos (not caps) in the
3668 subloops. */
3669 for (subloop_node = loop_tree_node->subloops;
3670 subloop_node != NULLnullptr;
3671 subloop_node = subloop_node->subloop_next)
3672 {
3673 ira_assert (subloop_node->bb == NULL)((void)(!(subloop_node->bb == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3673, __FUNCTION__), 0 : 0))
;
3674 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)for (bmp_iter_set_init (&(bi), (consideration_allocno_bitmap
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
3675 {
3676 a = ira_allocnos[j];
3677 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL)((void)(!(((a)->cap_member) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3677, __FUNCTION__), 0 : 0))
;
3678 mode = ALLOCNO_MODE (a)((a)->mode);
3679 rclass = ALLOCNO_CLASS (a)((a)->aclass);
3680 hard_regno = ALLOCNO_HARD_REGNO (a)((a)->hard_regno);
3681 /* Use hard register class here. ??? */
3682 if (hard_regno >= 0)
3683 {
3684 index = ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[rclass][hard_regno];
3685 ira_assert (index >= 0)((void)(!(index >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3685, __FUNCTION__), 0 : 0))
;
3686 }
3687 regno = ALLOCNO_REGNO (a)((a)->regno);
3688 /* ??? conflict costs */
3689 subloop_allocno = subloop_node->regno_allocno_map[regno];
3690 if (subloop_allocno == NULLnullptr
3691 || ALLOCNO_CAP (subloop_allocno)((subloop_allocno)->cap) != NULLnullptr)
3692 continue;
3693 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass)((void)(!(((subloop_allocno)->aclass) == rclass) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3693, __FUNCTION__), 0 : 0))
;
3694 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,((void)(!(bitmap_bit_p (subloop_node->all_allocnos, ((subloop_allocno
)->num))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3695, __FUNCTION__), 0 : 0))
3695 ALLOCNO_NUM (subloop_allocno)))((void)(!(bitmap_bit_p (subloop_node->all_allocnos, ((subloop_allocno
)->num))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3695, __FUNCTION__), 0 : 0))
;
3696 if (ira_single_region_allocno_p (a, subloop_allocno)
3697 || !ira_subloop_allocnos_can_differ_p (a, hard_regno >= 0,
3698 false))
3699 {
3700 gcc_assert (!ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P((void)(!(!((subloop_allocno)->might_conflict_with_parent_p
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3701, __FUNCTION__), 0 : 0))
3701 (subloop_allocno))((void)(!(!((subloop_allocno)->might_conflict_with_parent_p
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3701, __FUNCTION__), 0 : 0))
;
3702 if (! ALLOCNO_ASSIGNED_P (subloop_allocno)((subloop_allocno)->assigned_p))
3703 {
3704 ALLOCNO_HARD_REGNO (subloop_allocno)((subloop_allocno)->hard_regno) = hard_regno;
3705 ALLOCNO_ASSIGNED_P (subloop_allocno)((subloop_allocno)->assigned_p) = true;
3706 if (hard_regno >= 0)
3707 update_costs_from_copies (subloop_allocno, true, true);
3708 /* We don't need updated costs anymore. */
3709 ira_free_allocno_updated_costs (subloop_allocno);
3710 }
3711 }
3712 else if (hard_regno < 0)
3713 {
3714 /* If we allocate a register to SUBLOOP_ALLOCNO, we'll need
3715 to load the register on entry to the subloop and store
3716 the register back on exit from the subloop. This incurs
3717 a fixed cost for all registers. Since UPDATED_MEMORY_COST
3718 is (and should only be) used relative to the register costs
3719 for the same allocno, we can subtract this shared register
3720 cost from the memory cost. */
3721 ira_loop_border_costs border_costs (subloop_allocno);
3722 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)((subloop_allocno)->updated_memory_cost)
3723 -= border_costs.spill_outside_loop_cost ();
3724 }
3725 else
3726 {
3727 ira_loop_border_costs border_costs (subloop_allocno);
3728 aclass = ALLOCNO_CLASS (subloop_allocno)((subloop_allocno)->aclass);
3729 ira_init_register_move_cost_if_necessary (mode);
3730 cost = border_costs.move_between_loops_cost ();
3731 ira_allocate_and_set_or_copy_costs
3732 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->updated_hard_reg_costs), aclass,
3733 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)((subloop_allocno)->updated_class_cost),
3734 ALLOCNO_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->hard_reg_costs));
3735 ira_allocate_and_set_or_copy_costs
3736 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->updated_conflict_hard_reg_costs),
3737 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->conflict_hard_reg_costs));
3738 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->updated_hard_reg_costs)[index] -= cost;
3739 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->updated_conflict_hard_reg_costs)[index]
3740 -= cost;
3741 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)((subloop_allocno)->updated_class_cost)
3742 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->updated_hard_reg_costs)[index])
3743 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)((subloop_allocno)->updated_class_cost)
3744 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->updated_hard_reg_costs)[index];
3745 /* If we spill SUBLOOP_ALLOCNO, we'll need to store HARD_REGNO
3746 on entry to the subloop and restore HARD_REGNO on exit from
3747 the subloop. */
3748 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)((subloop_allocno)->updated_memory_cost)
3749 += border_costs.spill_inside_loop_cost ();
3750 }
3751 }
3752 }
3753 ira_free (allocno_color_data);
3754 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)for (bmp_iter_set_init (&(bi), (consideration_allocno_bitmap
), (0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
3755 {
3756 a = ira_allocnos[j];
3757 ALLOCNO_ADD_DATA (a)((a)->add_data) = NULLnullptr;
3758 }
3759}
3760
3761/* Initialize the common data for coloring and calls functions to do
3762 Chaitin-Briggs and regional coloring. */
3763static void
3764do_coloring (void)
3765{
3766 coloring_allocno_bitmap = ira_allocate_bitmap ();
3767 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULLnullptr)
3768 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
3769
3770 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULLnullptr);
3771
3772 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULLnullptr)
3773 ira_print_disposition (ira_dump_file);
3774
3775 ira_free_bitmap (coloring_allocno_bitmap);
3776}
3777
3778
3779
3780/* Move spill/restore code, which are to be generated in ira-emit.cc,
3781 to less frequent points (if it is profitable) by reassigning some
3782 allocnos (in loop with subloops containing in another loop) to
3783 memory which results in longer live-range where the corresponding
3784 pseudo-registers will be in memory. */
3785static void
3786move_spill_restore (void)
3787{
3788 int cost, regno, hard_regno, hard_regno2, index;
3789 bool changed_p;
3790 machine_mode mode;
3791 enum reg_class rclass;
3792 ira_allocno_t a, parent_allocno, subloop_allocno;
3793 ira_loop_tree_node_t parent, loop_node, subloop_node;
3794 ira_allocno_iterator ai;
3795
3796 for (;;)
3797 {
3798 changed_p = false;
3799 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULLnullptr)
3800 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
3801 FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond
(&(ai), &(a));)
3802 {
3803 regno = ALLOCNO_REGNO (a)((a)->regno);
3804 loop_node = ALLOCNO_LOOP_TREE_NODE (a)((a)->loop_tree_node);
3805 if (ALLOCNO_CAP_MEMBER (a)((a)->cap_member) != NULLnullptr
3806 || ALLOCNO_CAP (a)((a)->cap) != NULLnullptr
3807 || (hard_regno = ALLOCNO_HARD_REGNO (a)((a)->hard_regno)) < 0
3808 || loop_node->children == NULLnullptr
3809 /* don't do the optimization because it can create
3810 copies and the reload pass can spill the allocno set
3811 by copy although the allocno will not get memory
3812 slot. */
3813 || ira_equiv_no_lvalue_p (regno)
3814 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)((a)->num))
3815 /* Do not spill static chain pointer pseudo when
3816 non-local goto is used. */
3817 || non_spilled_static_chain_regno_p (regno))
3818 continue;
3819 mode = ALLOCNO_MODE (a)((a)->mode);
3820 rclass = ALLOCNO_CLASS (a)((a)->aclass);
3821 index = ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[rclass][hard_regno];
3822 ira_assert (index >= 0)((void)(!(index >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3822, __FUNCTION__), 0 : 0))
;
3823 cost = (ALLOCNO_MEMORY_COST (a)((a)->memory_cost)
3824 - (ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs) == NULLnullptr
3825 ? ALLOCNO_CLASS_COST (a)((a)->class_cost)
3826 : ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs)[index]));
3827 ira_init_register_move_cost_if_necessary (mode);
3828 for (subloop_node = loop_node->subloops;
3829 subloop_node != NULLnullptr;
3830 subloop_node = subloop_node->subloop_next)
3831 {
3832 ira_assert (subloop_node->bb == NULL)((void)(!(subloop_node->bb == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3832, __FUNCTION__), 0 : 0))
;
3833 subloop_allocno = subloop_node->regno_allocno_map[regno];
3834 if (subloop_allocno == NULLnullptr)
3835 continue;
3836 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno))((void)(!(rclass == ((subloop_allocno)->aclass)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3836, __FUNCTION__), 0 : 0))
;
3837 ira_loop_border_costs border_costs (subloop_allocno);
3838
3839 /* We have accumulated cost. To get the real cost of
3840 allocno usage in the loop we should subtract the costs
3841 added by propagate_allocno_info for the subloop allocnos. */
3842 int reg_cost
3843 = (ALLOCNO_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->hard_reg_costs) == NULLnullptr
3844 ? ALLOCNO_CLASS_COST (subloop_allocno)((subloop_allocno)->class_cost)
3845 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)((subloop_allocno)->hard_reg_costs)[index]);
3846
3847 int spill_cost
3848 = (border_costs.spill_inside_loop_cost ()
3849 + ALLOCNO_MEMORY_COST (subloop_allocno)((subloop_allocno)->memory_cost));
3850
3851 /* If HARD_REGNO conflicts with SUBLOOP_A then
3852 propagate_allocno_info will have propagated
3853 the cost of spilling HARD_REGNO in SUBLOOP_NODE.
3854 (ira_subloop_allocnos_can_differ_p must be true
3855 in that case.) If HARD_REGNO is a caller-saved
3856 register, we might have modelled it in the same way.
3857
3858 Otherwise, SPILL_COST acted as a cap on the propagated
3859 register cost, in cases where the allocations can differ. */
3860 auto conflicts = ira_total_conflict_hard_regs (subloop_allocno);
3861 if (TEST_HARD_REG_BIT (conflicts, hard_regno)
3862 || (ira_need_caller_save_p (subloop_allocno, hard_regno)
3863 && ira_caller_save_loop_spill_p (a, subloop_allocno,
3864 spill_cost)))
3865 reg_cost = spill_cost;
3866 else if (ira_subloop_allocnos_can_differ_p (a))
3867 reg_cost = MIN (reg_cost, spill_cost)((reg_cost) < (spill_cost) ? (reg_cost) : (spill_cost));
3868
3869 cost -= ALLOCNO_MEMORY_COST (subloop_allocno)((subloop_allocno)->memory_cost) - reg_cost;
3870
3871 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)((subloop_allocno)->hard_regno)) < 0)
3872 /* The register was spilled in the subloop. If we spill
3873 it in the outer loop too then we'll no longer need to
3874 save the register on entry to the subloop and restore
3875 the register on exit from the subloop. */
3876 cost -= border_costs.spill_inside_loop_cost ();
3877 else
3878 {
3879 /* The register was also allocated in the subloop. If we
3880 spill it in the outer loop then we'll need to load the
3881 register on entry to the subloop and store the register
3882 back on exit from the subloop. */
3883 cost += border_costs.spill_outside_loop_cost ();
3884 if (hard_regno2 != hard_regno)
3885 cost -= border_costs.move_between_loops_cost ();
3886 }
3887 }
3888 if ((parent = loop_node->parent) != NULLnullptr
3889 && (parent_allocno = parent->regno_allocno_map[regno]) != NULLnullptr)
3890 {
3891 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno))((void)(!(rclass == ((parent_allocno)->aclass)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3891, __FUNCTION__), 0 : 0))
;
3892 ira_loop_border_costs border_costs (a);
3893 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)((parent_allocno)->hard_regno)) < 0)
3894 /* The register was spilled in the parent loop. If we spill
3895 it in this loop too then we'll no longer need to load the
3896 register on entry to this loop and save the register back
3897 on exit from this loop. */
3898 cost -= border_costs.spill_outside_loop_cost ();
3899 else
3900 {
3901 /* The register was also allocated in the parent loop.
3902 If we spill it in this loop then we'll need to save
3903 the register on entry to this loop and restore the
3904 register on exit from this loop. */
3905 cost += border_costs.spill_inside_loop_cost ();
3906 if (hard_regno2 != hard_regno)
3907 cost -= border_costs.move_between_loops_cost ();
3908 }
3909 }
3910 if (cost < 0)
3911 {
3912 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = -1;
3913 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
3914 {
3915 fprintf
3916 (ira_dump_file,
3917 " Moving spill/restore for a%dr%d up from loop %d",
3918 ALLOCNO_NUM (a)((a)->num), regno, loop_node->loop_num);
3919 fprintf (ira_dump_file, " - profit %d\n", -cost);
3920 }
3921 changed_p = true;
3922 }
3923 }
3924 if (! changed_p)
3925 break;
3926 }
3927}
3928
3929
3930
3931/* Update current hard reg costs and current conflict hard reg costs
3932 for allocno A. It is done by processing its copies containing
3933 other allocnos already assigned. */
3934static void
3935update_curr_costs (ira_allocno_t a)
3936{
3937 int i, hard_regno, cost;
3938 machine_mode mode;
3939 enum reg_class aclass, rclass;
3940 ira_allocno_t another_a;
3941 ira_copy_t cp, next_cp;
3942
3943 ira_free_allocno_updated_costs (a);
3944 ira_assert (! ALLOCNO_ASSIGNED_P (a))((void)(!(! ((a)->assigned_p)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3944, __FUNCTION__), 0 : 0))
;
3945 aclass = ALLOCNO_CLASS (a)((a)->aclass);
3946 if (aclass == NO_REGS)
3947 return;
3948 mode = ALLOCNO_MODE (a)((a)->mode);
3949 ira_init_register_move_cost_if_necessary (mode);
3950 for (cp = ALLOCNO_COPIES (a)((a)->allocno_copies); cp != NULLnullptr; cp = next_cp)
3951 {
3952 if (cp->first == a)
3953 {
3954 next_cp = cp->next_first_allocno_copy;
3955 another_a = cp->second;
3956 }
3957 else if (cp->second == a)
3958 {
3959 next_cp = cp->next_second_allocno_copy;
3960 another_a = cp->first;
3961 }
3962 else
3963 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 3963, __FUNCTION__))
;
3964 if (! ira_reg_classes_intersect_p(this_target_ira->x_ira_reg_classes_intersect_p)[aclass][ALLOCNO_CLASS (another_a)((another_a)->aclass)]
3965 || ! ALLOCNO_ASSIGNED_P (another_a)((another_a)->assigned_p)
3966 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)((another_a)->hard_regno)) < 0)
3967 continue;
3968 rclass = REGNO_REG_CLASS (hard_regno)(regclass_map[(hard_regno)]);
3969 i = ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][hard_regno];
3970 if (i < 0)
3971 continue;
3972 cost = (cp->first == a
3973 ? ira_register_move_cost(this_target_ira_int->x_ira_register_move_cost)[mode][rclass][aclass]
3974 : ira_register_move_cost(this_target_ira_int->x_ira_register_move_cost)[mode][aclass][rclass]);
3975 ira_allocate_and_set_or_copy_costs
3976 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a)((a)->updated_hard_reg_costs), aclass, ALLOCNO_CLASS_COST (a)((a)->class_cost),
3977 ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs));
3978 ira_allocate_and_set_or_copy_costs
3979 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)((a)->updated_conflict_hard_reg_costs),
3980 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a)((a)->conflict_hard_reg_costs));
3981 ALLOCNO_UPDATED_HARD_REG_COSTS (a)((a)->updated_hard_reg_costs)[i] -= cp->freq * cost;
3982 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)((a)->updated_conflict_hard_reg_costs)[i] -= cp->freq * cost;
3983 }
3984}
3985
3986/* Try to assign hard registers to the unassigned allocnos and
3987 allocnos conflicting with them or conflicting with allocnos whose
3988 regno >= START_REGNO. The function is called after ira_flattening,
3989 so more allocnos (including ones created in ira-emit.cc) will have a
3990 chance to get a hard register. We use simple assignment algorithm
3991 based on priorities. */
3992void
3993ira_reassign_conflict_allocnos (int start_regno)
3994{
3995 int i, allocnos_to_color_num;
3996 ira_allocno_t a;
3997 enum reg_class aclass;
3998 bitmap allocnos_to_color;
3999 ira_allocno_iterator ai;
4000
4001 allocnos_to_color = ira_allocate_bitmap ();
4002 allocnos_to_color_num = 0;
4003 FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond
(&(ai), &(a));)
4004 {
4005 int n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
4006
4007 if (! ALLOCNO_ASSIGNED_P (a)((a)->assigned_p)
4008 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)((a)->num)))
4009 {
4010 if (ALLOCNO_CLASS (a)((a)->aclass) != NO_REGS)
4011 sorted_allocnos[allocnos_to_color_num++] = a;
4012 else
4013 {
4014 ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) = true;
4015 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = -1;
4016 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL)((void)(!(((a)->updated_hard_reg_costs) == nullptr) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4016, __FUNCTION__), 0 : 0))
;
4017 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL)((void)(!(((a)->updated_conflict_hard_reg_costs) == nullptr
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4017, __FUNCTION__), 0 : 0))
;
4018 }
4019 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a)((a)->num));
4020 }
4021 if (ALLOCNO_REGNO (a)((a)->regno) < start_regno
4022 || (aclass = ALLOCNO_CLASS (a)((a)->aclass)) == NO_REGS)
4023 continue;
4024 for (i = 0; i < n; i++)
4025 {
4026 ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]);
4027 ira_object_t conflict_obj;
4028 ira_object_conflict_iterator oci;
4029
4030 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
4031 {
4032 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
4033
4034 ira_assert (ira_reg_classes_intersect_p((void)(!((this_target_ira->x_ira_reg_classes_intersect_p)
[aclass][((conflict_a)->aclass)]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4035, __FUNCTION__), 0 : 0))
4035 [aclass][ALLOCNO_CLASS (conflict_a)])((void)(!((this_target_ira->x_ira_reg_classes_intersect_p)
[aclass][((conflict_a)->aclass)]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4035, __FUNCTION__), 0 : 0))
;
4036 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)((conflict_a)->num)))
4037 continue;
4038 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
4039 }
4040 }
4041 }
4042 ira_free_bitmap (allocnos_to_color);
4043 if (allocnos_to_color_num > 1)
4044 {
4045 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
4046 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),gcc_qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t
), allocno_priority_compare_func)
4047 allocno_priority_compare_func)gcc_qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t
), allocno_priority_compare_func)
;
4048 }
4049 for (i = 0; i < allocnos_to_color_num; i++)
4050 {
4051 a = sorted_allocnos[i];
4052 ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) = false;
4053 update_curr_costs (a);
4054 }
4055 for (i = 0; i < allocnos_to_color_num; i++)
4056 {
4057 a = sorted_allocnos[i];
4058 if (assign_hard_reg (a, true))
4059 {
4060 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4061 fprintf
4062 (ira_dump_file,
4063 " Secondary allocation: assign hard reg %d to reg %d\n",
4064 ALLOCNO_HARD_REGNO (a)((a)->hard_regno), ALLOCNO_REGNO (a)((a)->regno));
4065 }
4066 }
4067}
4068
4069
4070
4071/* This page contains functions used to find conflicts using allocno
4072 live ranges. */
4073
4074#ifdef ENABLE_IRA_CHECKING
4075
4076/* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
4077 intersect. This should be used when there is only one region.
4078 Currently this is used during reload. */
4079static bool
4080conflict_by_live_ranges_p (int regno1, int regno2)
4081{
4082 ira_allocno_t a1, a2;
4083
4084 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER((void)(!(regno1 >= 76 && regno2 >= 76) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4085, __FUNCTION__), 0 : 0))
4085 && regno2 >= FIRST_PSEUDO_REGISTER)((void)(!(regno1 >= 76 && regno2 >= 76) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4085, __FUNCTION__), 0 : 0))
;
4086 /* Reg info calculated by dataflow infrastructure can be different
4087 from one calculated by regclass. */
4088 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULLnullptr
4089 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULLnullptr)
4090 return false;
4091 return allocnos_conflict_by_live_ranges_p (a1, a2);
4092}
4093
4094#endif
4095
4096
4097
4098/* This page contains code to coalesce memory stack slots used by
4099 spilled allocnos. This results in smaller stack frame, better data
4100 locality, and in smaller code for some architectures like
4101 x86/x86_64 where insn size depends on address displacement value.
4102 On the other hand, it can worsen insn scheduling after the RA but
4103 in practice it is less important than smaller stack frames. */
4104
4105/* TRUE if we coalesced some allocnos. In other words, if we got
4106 loops formed by members first_coalesced_allocno and
4107 next_coalesced_allocno containing more one allocno. */
4108static bool allocno_coalesced_p;
4109
4110/* Bitmap used to prevent a repeated allocno processing because of
4111 coalescing. */
4112static bitmap processed_coalesced_allocno_bitmap;
4113
4114/* See below. */
4115typedef struct coalesce_data *coalesce_data_t;
4116
4117/* To decrease footprint of ira_allocno structure we store all data
4118 needed only for coalescing in the following structure. */
4119struct coalesce_data
4120{
4121 /* Coalesced allocnos form a cyclic list. One allocno given by
4122 FIRST represents all coalesced allocnos. The
4123 list is chained by NEXT. */
4124 ira_allocno_t first;
4125 ira_allocno_t next;
4126 int temp;
4127};
4128
4129/* Container for storing allocno data concerning coalescing. */
4130static coalesce_data_t allocno_coalesce_data;
4131
4132/* Macro to access the data concerning coalescing. */
4133#define ALLOCNO_COALESCE_DATA(a)((coalesce_data_t) ((a)->add_data)) ((coalesce_data_t) ALLOCNO_ADD_DATA (a)((a)->add_data))
4134
4135/* Merge two sets of coalesced allocnos given correspondingly by
4136 allocnos A1 and A2 (more accurately merging A2 set into A1
4137 set). */
4138static void
4139merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
4140{
4141 ira_allocno_t a, first, last, next;
4142
4143 first = ALLOCNO_COALESCE_DATA (a1)((coalesce_data_t) ((a1)->add_data))->first;
4144 a = ALLOCNO_COALESCE_DATA (a2)((coalesce_data_t) ((a2)->add_data))->first;
4145 if (first == a)
4146 return;
4147 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)((coalesce_data_t) ((a2)->add_data))->next;;
4148 a = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next)
4149 {
4150 ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->first = first;
4151 if (a == a2)
4152 break;
4153 last = a;
4154 }
4155 next = allocno_coalesce_data[ALLOCNO_NUM (first)((first)->num)].next;
4156 allocno_coalesce_data[ALLOCNO_NUM (first)((first)->num)].next = a2;
4157 allocno_coalesce_data[ALLOCNO_NUM (last)((last)->num)].next = next;
4158}
4159
4160/* Return TRUE if there are conflicting allocnos from two sets of
4161 coalesced allocnos given correspondingly by allocnos A1 and A2. We
4162 use live ranges to find conflicts because conflicts are represented
4163 only for allocnos of the same allocno class and during the reload
4164 pass we coalesce allocnos for sharing stack memory slots. */
4165static bool
4166coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
4167{
4168 ira_allocno_t a, conflict_a;
4169
4170 if (allocno_coalesced_p)
4171 {
4172 bitmap_clear (processed_coalesced_allocno_bitmap);
4173 for (a = ALLOCNO_COALESCE_DATA (a1)((coalesce_data_t) ((a1)->add_data))->next;;
4174 a = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next)
4175 {
4176 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a)((a)->num));
4177 if (a == a1)
4178 break;
4179 }
4180 }
4181 for (a = ALLOCNO_COALESCE_DATA (a2)((coalesce_data_t) ((a2)->add_data))->next;;
4182 a = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next)
4183 {
4184 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)((coalesce_data_t) ((a1)->add_data))->next;;
4185 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)((coalesce_data_t) ((conflict_a)->add_data))->next)
4186 {
4187 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
4188 return true;
4189 if (conflict_a == a1)
4190 break;
4191 }
4192 if (a == a2)
4193 break;
4194 }
4195 return false;
4196}
4197
4198/* The major function for aggressive allocno coalescing. We coalesce
4199 only spilled allocnos. If some allocnos have been coalesced, we
4200 set up flag allocno_coalesced_p. */
4201static void
4202coalesce_allocnos (void)
4203{
4204 ira_allocno_t a;
4205 ira_copy_t cp, next_cp;
4206 unsigned int j;
4207 int i, n, cp_num, regno;
4208 bitmap_iterator bi;
4209
4210 cp_num = 0;
4211 /* Collect copies. */
4212 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)for (bmp_iter_set_init (&(bi), (coloring_allocno_bitmap),
(0), &(j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next
(&(bi), &(j)))
4213 {
4214 a = ira_allocnos[j];
4215 regno = ALLOCNO_REGNO (a)((a)->regno);
4216 if (! ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) || ALLOCNO_HARD_REGNO (a)((a)->hard_regno) >= 0
4217 || ira_equiv_no_lvalue_p (regno))
4218 continue;
4219 for (cp = ALLOCNO_COPIES (a)((a)->allocno_copies); cp != NULLnullptr; cp = next_cp)
4220 {
4221 if (cp->first == a)
4222 {
4223 next_cp = cp->next_first_allocno_copy;
4224 regno = ALLOCNO_REGNO (cp->second)((cp->second)->regno);
4225 /* For priority coloring we coalesce allocnos only with
4226 the same allocno class not with intersected allocno
4227 classes as it were possible. It is done for
4228 simplicity. */
4229 if ((cp->insn != NULLnullptr || cp->constraint_p)
4230 && ALLOCNO_ASSIGNED_P (cp->second)((cp->second)->assigned_p)
4231 && ALLOCNO_HARD_REGNO (cp->second)((cp->second)->hard_regno) < 0
4232 && ! ira_equiv_no_lvalue_p (regno))
4233 sorted_copies[cp_num++] = cp;
4234 }
4235 else if (cp->second == a)
4236 next_cp = cp->next_second_allocno_copy;
4237 else
4238 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4238, __FUNCTION__))
;
4239 }
4240 }
4241 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func)gcc_qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func
)
;
4242 /* Coalesced copies, most frequently executed first. */
4243 for (; cp_num != 0;)
4244 {
4245 for (i = 0; i < cp_num; i++)
4246 {
4247 cp = sorted_copies[i];
4248 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
4249 {
4250 allocno_coalesced_p = true;
4251 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4252 fprintf
4253 (ira_dump_file,
4254 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
4255 cp->num, ALLOCNO_NUM (cp->first)((cp->first)->num), ALLOCNO_REGNO (cp->first)((cp->first)->regno),
4256 ALLOCNO_NUM (cp->second)((cp->second)->num), ALLOCNO_REGNO (cp->second)((cp->second)->regno),
4257 cp->freq);
4258 merge_allocnos (cp->first, cp->second);
4259 i++;
4260 break;
4261 }
4262 }
4263 /* Collect the rest of copies. */
4264 for (n = 0; i < cp_num; i++)
4265 {
4266 cp = sorted_copies[i];
4267 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)((cp->first)->num)].first
4268 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)((cp->second)->num)].first)
4269 sorted_copies[n++] = cp;
4270 }
4271 cp_num = n;
4272 }
4273}
4274
4275/* Usage cost and order number of coalesced allocno set to which
4276 given pseudo register belongs to. */
4277static int *regno_coalesced_allocno_cost;
4278static int *regno_coalesced_allocno_num;
4279
4280/* Sort pseudos according frequencies of coalesced allocno sets they
4281 belong to (putting most frequently ones first), and according to
4282 coalesced allocno set order numbers. */
4283static int
4284coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
4285{
4286 const int regno1 = *(const int *) v1p;
4287 const int regno2 = *(const int *) v2p;
4288 int diff;
4289
4290 if ((diff = (regno_coalesced_allocno_cost[regno2]
4291 - regno_coalesced_allocno_cost[regno1])) != 0)
4292 return diff;
4293 if ((diff = (regno_coalesced_allocno_num[regno1]
4294 - regno_coalesced_allocno_num[regno2])) != 0)
4295 return diff;
4296 return regno1 - regno2;
4297}
4298
4299/* Widest width in which each pseudo reg is referred to (via subreg).
4300 It is used for sorting pseudo registers. */
4301static machine_mode *regno_max_ref_mode;
4302
4303/* Sort pseudos according their slot numbers (putting ones with
4304 smaller numbers first, or last when the frame pointer is not
4305 needed). */
4306static int
4307coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
4308{
4309 const int regno1 = *(const int *) v1p;
4310 const int regno2 = *(const int *) v2p;
4311 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
4312 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
4313 int diff, slot_num1, slot_num2;
4314 machine_mode mode1, mode2;
4315
4316 if (a1 == NULLnullptr || ALLOCNO_HARD_REGNO (a1)((a1)->hard_regno) >= 0)
4317 {
4318 if (a2 == NULLnullptr || ALLOCNO_HARD_REGNO (a2)((a2)->hard_regno) >= 0)
4319 return regno1 - regno2;
4320 return 1;
4321 }
4322 else if (a2 == NULLnullptr || ALLOCNO_HARD_REGNO (a2)((a2)->hard_regno) >= 0)
4323 return -1;
4324 slot_num1 = -ALLOCNO_HARD_REGNO (a1)((a1)->hard_regno);
4325 slot_num2 = -ALLOCNO_HARD_REGNO (a2)((a2)->hard_regno);
4326 if ((diff = slot_num1 - slot_num2) != 0)
4327 return (frame_pointer_needed((&x_rtl)->frame_pointer_needed)
4328 || (!FRAME_GROWS_DOWNWARD1) == STACK_GROWS_DOWNWARD1 ? diff : -diff);
4329 mode1 = wider_subreg_mode (PSEUDO_REGNO_MODE (regno1)((machine_mode) (regno_reg_rtx[regno1])->mode),
4330 regno_max_ref_mode[regno1]);
4331 mode2 = wider_subreg_mode (PSEUDO_REGNO_MODE (regno2)((machine_mode) (regno_reg_rtx[regno2])->mode),
4332 regno_max_ref_mode[regno2]);
4333 if ((diff = compare_sizes_for_sort (GET_MODE_SIZE (mode2),
4334 GET_MODE_SIZE (mode1))) != 0)
4335 return diff;
4336 return regno1 - regno2;
4337}
4338
4339/* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
4340 for coalesced allocno sets containing allocnos with their regnos
4341 given in array PSEUDO_REGNOS of length N. */
4342static void
4343setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
4344{
4345 int i, num, regno, cost;
4346 ira_allocno_t allocno, a;
4347
4348 for (num = i = 0; i < n; i++)
4349 {
4350 regno = pseudo_regnos[i];
4351 allocno = ira_regno_allocno_map[regno];
4352 if (allocno == NULLnullptr)
4353 {
4354 regno_coalesced_allocno_cost[regno] = 0;
4355 regno_coalesced_allocno_num[regno] = ++num;
4356 continue;
4357 }
4358 if (ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->first != allocno)
4359 continue;
4360 num++;
4361 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->next;;
4362 a = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next)
4363 {
4364 cost += ALLOCNO_FREQ (a)((a)->freq);
4365 if (a == allocno)
4366 break;
4367 }
4368 for (a = ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->next;;
4369 a = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next)
4370 {
4371 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)((a)->regno)] = num;
4372 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)((a)->regno)] = cost;
4373 if (a == allocno)
4374 break;
4375 }
4376 }
4377}
4378
4379/* Collect spilled allocnos representing coalesced allocno sets (the
4380 first coalesced allocno). The collected allocnos are returned
4381 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
4382 number of the collected allocnos. The allocnos are given by their
4383 regnos in array PSEUDO_REGNOS of length N. */
4384static int
4385collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
4386 ira_allocno_t *spilled_coalesced_allocnos)
4387{
4388 int i, num, regno;
4389 ira_allocno_t allocno;
4390
4391 for (num = i = 0; i < n; i++)
4392 {
4393 regno = pseudo_regnos[i];
4394 allocno = ira_regno_allocno_map[regno];
4395 if (allocno == NULLnullptr || ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) >= 0
4396 || ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->first != allocno)
4397 continue;
4398 spilled_coalesced_allocnos[num++] = allocno;
4399 }
4400 return num;
4401}
4402
4403/* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
4404 given slot contains live ranges of coalesced allocnos assigned to
4405 given slot. */
4406static live_range_t *slot_coalesced_allocnos_live_ranges;
4407
4408/* Return TRUE if coalesced allocnos represented by ALLOCNO has live
4409 ranges intersected with live ranges of coalesced allocnos assigned
4410 to slot with number N. */
4411static bool
4412slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
4413{
4414 ira_allocno_t a;
4415
4416 for (a = ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->next;;
4417 a = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next)
4418 {
4419 int i;
4420 int nr = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
4421 gcc_assert (ALLOCNO_CAP_MEMBER (a) == NULL)((void)(!(((a)->cap_member) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4421, __FUNCTION__), 0 : 0))
;
4422 for (i = 0; i < nr; i++)
4423 {
4424 ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]);
4425
4426 if (ira_live_ranges_intersect_p
4427 (slot_coalesced_allocnos_live_ranges[n],
4428 OBJECT_LIVE_RANGES (obj)((obj)->live_ranges)))
4429 return true;
4430 }
4431 if (a == allocno)
4432 break;
4433 }
4434 return false;
4435}
4436
4437/* Update live ranges of slot to which coalesced allocnos represented
4438 by ALLOCNO were assigned. */
4439static void
4440setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
4441{
4442 int i, n;
4443 ira_allocno_t a;
4444 live_range_t r;
4445
4446 n = ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->temp;
4447 for (a = ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->next;;
4448 a = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next)
4449 {
4450 int nr = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
4451 gcc_assert (ALLOCNO_CAP_MEMBER (a) == NULL)((void)(!(((a)->cap_member) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4451, __FUNCTION__), 0 : 0))
;
4452 for (i = 0; i < nr; i++)
4453 {
4454 ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]);
4455
4456 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj)((obj)->live_ranges));
4457 slot_coalesced_allocnos_live_ranges[n]
4458 = ira_merge_live_ranges
4459 (slot_coalesced_allocnos_live_ranges[n], r);
4460 }
4461 if (a == allocno)
4462 break;
4463 }
4464}
4465
4466/* We have coalesced allocnos involving in copies. Coalesce allocnos
4467 further in order to share the same memory stack slot. Allocnos
4468 representing sets of allocnos coalesced before the call are given
4469 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
4470 some allocnos were coalesced in the function. */
4471static bool
4472coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
4473{
4474 int i, j, n, last_coalesced_allocno_num;
4475 ira_allocno_t allocno, a;
4476 bool merged_p = false;
4477 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
4478
4479 slot_coalesced_allocnos_live_ranges
4480 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
4481 memset (slot_coalesced_allocnos_live_ranges, 0,
4482 sizeof (live_range_t) * ira_allocnos_num);
4483 last_coalesced_allocno_num = 0;
4484 /* Coalesce non-conflicting spilled allocnos preferring most
4485 frequently used. */
4486 for (i = 0; i < num; i++)
4487 {
4488 allocno = spilled_coalesced_allocnos[i];
4489 if (ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->first != allocno
4490 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno)((allocno)->regno))
4491 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)((allocno)->regno)))
4492 continue;
4493 for (j = 0; j < i; j++)
4494 {
4495 a = spilled_coalesced_allocnos[j];
4496 n = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->temp;
4497 if (ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->first == a
4498 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a)((a)->regno))
4499 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a)((a)->regno))
4500 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
4501 break;
4502 }
4503 if (j >= i)
4504 {
4505 /* No coalescing: set up number for coalesced allocnos
4506 represented by ALLOCNO. */
4507 ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->temp = last_coalesced_allocno_num++;
4508 setup_slot_coalesced_allocno_live_ranges (allocno);
4509 }
4510 else
4511 {
4512 allocno_coalesced_p = true;
4513 merged_p = true;
4514 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4515 fprintf (ira_dump_file,
4516 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
4517 ALLOCNO_NUM (allocno)((allocno)->num), ALLOCNO_REGNO (allocno)((allocno)->regno),
4518 ALLOCNO_NUM (a)((a)->num), ALLOCNO_REGNO (a)((a)->regno));
4519 ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->temp
4520 = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->temp;
4521 setup_slot_coalesced_allocno_live_ranges (allocno);
4522 merge_allocnos (a, allocno);
4523 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a)((void)(!(((coalesce_data_t) ((a)->add_data))->first ==
a) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4523, __FUNCTION__), 0 : 0))
;
4524 }
4525 }
4526 for (i = 0; i < ira_allocnos_num; i++)
4527 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
4528 ira_free (slot_coalesced_allocnos_live_ranges);
4529 return merged_p;
4530}
4531
4532/* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
4533 subsequent assigning stack slots to them in the reload pass. To do
4534 this we coalesce spilled allocnos first to decrease the number of
4535 memory-memory move insns. This function is called by the
4536 reload. */
4537void
4538ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
4539 machine_mode *reg_max_ref_mode)
4540{
4541 int max_regno = max_reg_num ();
4542 int i, regno, num, slot_num;
4543 ira_allocno_t allocno, a;
4544 ira_allocno_iterator ai;
4545 ira_allocno_t *spilled_coalesced_allocnos;
4546
4547 ira_assert (! ira_use_lra_p)((void)(!(! ira_use_lra_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4547, __FUNCTION__), 0 : 0))
;
4548
4549 /* Set up allocnos can be coalesced. */
4550 coloring_allocno_bitmap = ira_allocate_bitmap ();
4551 for (i = 0; i < n; i++)
4552 {
4553 regno = pseudo_regnos[i];
4554 allocno = ira_regno_allocno_map[regno];
4555 if (allocno != NULLnullptr)
4556 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno)((allocno)->num));
4557 }
4558 allocno_coalesced_p = false;
4559 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
4560 allocno_coalesce_data
4561 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
4562 * ira_allocnos_num);
4563 /* Initialize coalesce data for allocnos. */
4564 FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond
(&(ai), &(a));)
4565 {
4566 ALLOCNO_ADD_DATA (a)((a)->add_data) = allocno_coalesce_data + ALLOCNO_NUM (a)((a)->num);
4567 ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->first = a;
4568 ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next = a;
4569 }
4570 coalesce_allocnos ();
4571 ira_free_bitmap (coloring_allocno_bitmap);
4572 regno_coalesced_allocno_cost
4573 = (int *) ira_allocate (max_regno * sizeof (int));
4574 regno_coalesced_allocno_num
4575 = (int *) ira_allocate (max_regno * sizeof (int));
4576 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
4577 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4578 /* Sort regnos according frequencies of the corresponding coalesced
4579 allocno sets. */
4580 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare)gcc_qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare
)
;
4581 spilled_coalesced_allocnos
4582 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
4583 * sizeof (ira_allocno_t));
4584 /* Collect allocnos representing the spilled coalesced allocno
4585 sets. */
4586 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4587 spilled_coalesced_allocnos);
4588 if (flag_ira_share_spill_slotsglobal_options.x_flag_ira_share_spill_slots
4589 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
4590 {
4591 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4592 qsort (pseudo_regnos, n, sizeof (int),gcc_qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare
)
4593 coalesced_pseudo_reg_freq_compare)gcc_qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare
)
;
4594 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4595 spilled_coalesced_allocnos);
4596 }
4597 ira_free_bitmap (processed_coalesced_allocno_bitmap);
4598 allocno_coalesced_p = false;
4599 /* Assign stack slot numbers to spilled allocno sets, use smaller
4600 numbers for most frequently used coalesced allocnos. -1 is
4601 reserved for dynamic search of stack slots for pseudos spilled by
4602 the reload. */
4603 slot_num = 1;
4604 for (i = 0; i < num; i++)
4605 {
4606 allocno = spilled_coalesced_allocnos[i];
4607 if (ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->first != allocno
4608 || ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) >= 0
4609 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)((allocno)->regno)))
4610 continue;
4611 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4612 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
4613 slot_num++;
4614 for (a = ALLOCNO_COALESCE_DATA (allocno)((coalesce_data_t) ((allocno)->add_data))->next;;
4615 a = ALLOCNO_COALESCE_DATA (a)((coalesce_data_t) ((a)->add_data))->next)
4616 {
4617 ira_assert (ALLOCNO_HARD_REGNO (a) < 0)((void)(!(((a)->hard_regno) < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4617, __FUNCTION__), 0 : 0))
;
4618 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = -slot_num;
4619 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4620 {
4621 machine_mode mode = wider_subreg_mode
4622 (PSEUDO_REGNO_MODE (ALLOCNO_REGNO (a))((machine_mode) (regno_reg_rtx[((a)->regno)])->mode),
4623 reg_max_ref_mode[ALLOCNO_REGNO (a)((a)->regno)]);
4624 fprintf (ira_dump_file, " a%dr%d(%d,",
4625 ALLOCNO_NUM (a)((a)->num), ALLOCNO_REGNO (a)((a)->regno), ALLOCNO_FREQ (a)((a)->freq));
4626 print_dec (GET_MODE_SIZE (mode), ira_dump_file, SIGNED);
4627 fprintf (ira_dump_file, ")\n");
4628 }
4629
4630 if (a == allocno)
4631 break;
4632 }
4633 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4634 fprintf (ira_dump_file, "\n");
4635 }
4636 ira_spilled_reg_stack_slots_num = slot_num - 1;
4637 ira_free (spilled_coalesced_allocnos);
4638 /* Sort regnos according the slot numbers. */
4639 regno_max_ref_mode = reg_max_ref_mode;
4640 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare)gcc_qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare
)
;
4641 FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond
(&(ai), &(a));)
4642 ALLOCNO_ADD_DATA (a)((a)->add_data) = NULLnullptr;
4643 ira_free (allocno_coalesce_data);
4644 ira_free (regno_coalesced_allocno_num);
4645 ira_free (regno_coalesced_allocno_cost);
4646}
4647
4648
4649
4650/* This page contains code used by the reload pass to improve the
4651 final code. */
4652
4653/* The function is called from reload to mark changes in the
4654 allocation of REGNO made by the reload. Remember that reg_renumber
4655 reflects the change result. */
4656void
4657ira_mark_allocation_change (int regno)
4658{
4659 ira_allocno_t a = ira_regno_allocno_map[regno];
4660 int old_hard_regno, hard_regno, cost;
4661 enum reg_class aclass = ALLOCNO_CLASS (a)((a)->aclass);
4662
4663 ira_assert (a != NULL)((void)(!(a != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4663, __FUNCTION__), 0 : 0))
;
4664 hard_regno = reg_renumber[regno];
4665 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)((a)->hard_regno)) == hard_regno)
4666 return;
4667 if (old_hard_regno < 0)
4668 cost = -ALLOCNO_MEMORY_COST (a)((a)->memory_cost);
4669 else
4670 {
4671 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0)((void)(!((this_target_ira_int->x_ira_class_hard_reg_index
)[aclass][old_hard_regno] >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4671, __FUNCTION__), 0 : 0))
;
4672 cost = -(ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs) == NULLnullptr
4673 ? ALLOCNO_CLASS_COST (a)((a)->class_cost)
4674 : ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs)
4675 [ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][old_hard_regno]]);
4676 update_costs_from_copies (a, false, false);
4677 }
4678 ira_overall_cost -= cost;
4679 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = hard_regno;
4680 if (hard_regno < 0)
4681 {
4682 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = -1;
4683 cost += ALLOCNO_MEMORY_COST (a)((a)->memory_cost);
4684 }
4685 else if (ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][hard_regno] >= 0)
4686 {
4687 cost += (ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs) == NULLnullptr
4688 ? ALLOCNO_CLASS_COST (a)((a)->class_cost)
4689 : ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs)
4690 [ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][hard_regno]]);
4691 update_costs_from_copies (a, true, false);
4692 }
4693 else
4694 /* Reload changed class of the allocno. */
4695 cost = 0;
4696 ira_overall_cost += cost;
4697}
4698
4699/* This function is called when reload deletes memory-memory move. In
4700 this case we marks that the allocation of the corresponding
4701 allocnos should be not changed in future. Otherwise we risk to get
4702 a wrong code. */
4703void
4704ira_mark_memory_move_deletion (int dst_regno, int src_regno)
4705{
4706 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
4707 ira_allocno_t src = ira_regno_allocno_map[src_regno];
4708
4709 ira_assert (dst != NULL && src != NULL((void)(!(dst != nullptr && src != nullptr &&
((dst)->hard_regno) < 0 && ((src)->hard_regno
) < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4711, __FUNCTION__), 0 : 0))
4710 && ALLOCNO_HARD_REGNO (dst) < 0((void)(!(dst != nullptr && src != nullptr &&
((dst)->hard_regno) < 0 && ((src)->hard_regno
) < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4711, __FUNCTION__), 0 : 0))
4711 && ALLOCNO_HARD_REGNO (src) < 0)((void)(!(dst != nullptr && src != nullptr &&
((dst)->hard_regno) < 0 && ((src)->hard_regno
) < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4711, __FUNCTION__), 0 : 0))
;
4712 ALLOCNO_DONT_REASSIGN_P (dst)((dst)->dont_reassign_p) = true;
4713 ALLOCNO_DONT_REASSIGN_P (src)((src)->dont_reassign_p) = true;
4714}
4715
4716/* Try to assign a hard register (except for FORBIDDEN_REGS) to
4717 allocno A and return TRUE in the case of success. */
4718static bool
4719allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
4720{
4721 int hard_regno;
4722 enum reg_class aclass;
4723 int regno = ALLOCNO_REGNO (a)((a)->regno);
4724 HARD_REG_SET saved[2];
4725 int i, n;
4726
4727 n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
4728 for (i = 0; i < n; i++)
4729 {
4730 ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]);
4731 saved[i] = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs);
4732 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs) |= forbidden_regs;
4733 if (! flag_caller_savesglobal_options.x_flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a)((a)->calls_crossed_num) != 0)
4734 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs) |= ira_need_caller_save_regs (a);
4735 }
4736 ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) = false;
4737 aclass = ALLOCNO_CLASS (a)((a)->aclass);
4738 update_curr_costs (a);
4739 assign_hard_reg (a, true);
4740 hard_regno = ALLOCNO_HARD_REGNO (a)((a)->hard_regno);
4741 reg_renumber[regno] = hard_regno;
4742 if (hard_regno < 0)
4743 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = -1;
4744 else
4745 {
4746 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0)((void)(!((this_target_ira_int->x_ira_class_hard_reg_index
)[aclass][hard_regno] >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4746, __FUNCTION__), 0 : 0))
;
4747 ira_overall_cost
4748 -= (ALLOCNO_MEMORY_COST (a)((a)->memory_cost)
4749 - (ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs) == NULLnullptr
4750 ? ALLOCNO_CLASS_COST (a)((a)->class_cost)
4751 : ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs)[ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)
4752 [aclass][hard_regno]]));
4753 if (ira_need_caller_save_p (a, hard_regno))
4754 {
4755 ira_assert (flag_caller_saves)((void)(!(global_options.x_flag_caller_saves) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4755, __FUNCTION__), 0 : 0))
;
4756 caller_save_needed = 1;
4757 }
4758 }
4759
4760 /* If we found a hard register, modify the RTL for the pseudo
4761 register to show the hard register, and mark the pseudo register
4762 live. */
4763 if (reg_renumber[regno] >= 0)
4764 {
4765 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4766 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
4767 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno])(df_ref_change_reg_with_loc (regno_reg_rtx[regno], reg_renumber
[regno]))
;
4768 mark_home_live (regno);
4769 }
4770 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4771 fprintf (ira_dump_file, "\n");
4772 for (i = 0; i < n; i++)
4773 {
4774 ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]);
4775 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs) = saved[i];
4776 }
4777 return reg_renumber[regno] >= 0;
4778}
4779
4780/* Sort pseudos according their usage frequencies (putting most
4781 frequently ones first). */
4782static int
4783pseudo_reg_compare (const void *v1p, const void *v2p)
4784{
4785 int regno1 = *(const int *) v1p;
4786 int regno2 = *(const int *) v2p;
4787 int diff;
4788
4789 if ((diff = REG_FREQ (regno2)(reg_info_p[regno2].freq) - REG_FREQ (regno1)(reg_info_p[regno1].freq)) != 0)
4790 return diff;
4791 return regno1 - regno2;
4792}
4793
4794/* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
4795 NUM of them) or spilled pseudos conflicting with pseudos in
4796 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
4797 allocation has been changed. The function doesn't use
4798 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
4799 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
4800 is called by the reload pass at the end of each reload
4801 iteration. */
4802bool
4803ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
4804 HARD_REG_SET bad_spill_regs,
4805 HARD_REG_SET *pseudo_forbidden_regs,
4806 HARD_REG_SET *pseudo_previous_regs,
4807 bitmap spilled)
4808{
4809 int i, n, regno;
4810 bool changed_p;
4811 ira_allocno_t a;
4812 HARD_REG_SET forbidden_regs;
4813 bitmap temp = BITMAP_ALLOCbitmap_alloc (NULLnullptr);
4814
4815 /* Add pseudos which conflict with pseudos already in
4816 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
4817 to allocating in two steps as some of the conflicts might have
4818 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
4819 for (i = 0; i < num; i++)
4820 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
4821
4822 for (i = 0, n = num; i < n; i++)
4823 {
4824 int nr, j;
4825 int regno = spilled_pseudo_regs[i];
4826 bitmap_set_bit (temp, regno);
4827
4828 a = ira_regno_allocno_map[regno];
4829 nr = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
4830 for (j = 0; j < nr; j++)
4831 {
4832 ira_object_t conflict_obj;
4833 ira_object_t obj = ALLOCNO_OBJECT (a, j)((a)->objects[j]);
4834 ira_object_conflict_iterator oci;
4835
4836 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)for (ira_object_conflict_iter_init (&(oci), (obj)); ira_object_conflict_iter_cond
(&(oci), &(conflict_obj));)
4837 {
4838 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj)((conflict_obj)->allocno);
4839 if (ALLOCNO_HARD_REGNO (conflict_a)((conflict_a)->hard_regno) < 0
4840 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)((conflict_a)->dont_reassign_p)
4841 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)((conflict_a)->regno)))
4842 {
4843 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a)((conflict_a)->regno);
4844 /* ?!? This seems wrong. */
4845 bitmap_set_bit (consideration_allocno_bitmap,
4846 ALLOCNO_NUM (conflict_a)((conflict_a)->num));
4847 }
4848 }
4849 }
4850 }
4851
4852 if (num > 1)
4853 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare)gcc_qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare
)
;
4854 changed_p = false;
4855 /* Try to assign hard registers to pseudos from
4856 SPILLED_PSEUDO_REGS. */
4857 for (i = 0; i < num; i++)
4858 {
4859 regno = spilled_pseudo_regs[i];
4860 forbidden_regs = (bad_spill_regs
4861 | pseudo_forbidden_regs[regno]
4862 | pseudo_previous_regs[regno]);
4863 gcc_assert (reg_renumber[regno] < 0)((void)(!(reg_renumber[regno] < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4863, __FUNCTION__), 0 : 0))
;
4864 a = ira_regno_allocno_map[regno];
4865 ira_mark_allocation_change (regno);
4866 ira_assert (reg_renumber[regno] < 0)((void)(!(reg_renumber[regno] < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4866, __FUNCTION__), 0 : 0))
;
4867 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr)
4868 fprintf (ira_dump_file,
4869 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a)((a)->num),
4870 ALLOCNO_MEMORY_COST (a)((a)->memory_cost)
4871 - ALLOCNO_CLASS_COST (a)((a)->class_cost));
4872 allocno_reload_assign (a, forbidden_regs);
4873 if (reg_renumber[regno] >= 0)
4874 {
4875 CLEAR_REGNO_REG_SET (spilled, regno)bitmap_clear_bit (spilled, regno);
4876 changed_p = true;
4877 }
4878 }
4879 BITMAP_FREE (temp)((void) (bitmap_obstack_free ((bitmap) temp), (temp) = (bitmap
) nullptr))
;
4880 return changed_p;
4881}
4882
4883/* The function is called by reload and returns already allocated
4884 stack slot (if any) for REGNO with given INHERENT_SIZE and
4885 TOTAL_SIZE. In the case of failure to find a slot which can be
4886 used for REGNO, the function returns NULL. */
4887rtx
4888ira_reuse_stack_slot (int regno, poly_uint64 inherent_size,
4889 poly_uint64 total_size)
4890{
4891 unsigned int i;
4892 int slot_num, best_slot_num;
4893 int cost, best_cost;
4894 ira_copy_t cp, next_cp;
4895 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4896 rtx x;
4897 bitmap_iterator bi;
4898 class ira_spilled_reg_stack_slot *slot = NULLnullptr;
4899
4900 ira_assert (! ira_use_lra_p)((void)(!(! ira_use_lra_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4900, __FUNCTION__), 0 : 0))
;
4901
4902 ira_assert (known_eq (inherent_size, PSEUDO_REGNO_BYTES (regno))((void)(!((!maybe_ne (inherent_size, GET_MODE_SIZE (((machine_mode
) (regno_reg_rtx[regno])->mode)))) && (!maybe_lt (
total_size, inherent_size)) && ((allocno)->hard_regno
) < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4904, __FUNCTION__), 0 : 0))
4903 && known_le (inherent_size, total_size)((void)(!((!maybe_ne (inherent_size, GET_MODE_SIZE (((machine_mode
) (regno_reg_rtx[regno])->mode)))) && (!maybe_lt (
total_size, inherent_size)) && ((allocno)->hard_regno
) < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4904, __FUNCTION__), 0 : 0))
4904 && ALLOCNO_HARD_REGNO (allocno) < 0)((void)(!((!maybe_ne (inherent_size, GET_MODE_SIZE (((machine_mode
) (regno_reg_rtx[regno])->mode)))) && (!maybe_lt (
total_size, inherent_size)) && ((allocno)->hard_regno
) < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4904, __FUNCTION__), 0 : 0))
;
4905 if (! flag_ira_share_spill_slotsglobal_options.x_flag_ira_share_spill_slots)
4906 return NULL_RTX(rtx) 0;
4907 slot_num = -ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) - 2;
4908 if (slot_num != -1)
4909 {
4910 slot = &ira_spilled_reg_stack_slots[slot_num];
4911 x = slot->mem;
4912 }
4913 else
4914 {
4915 best_cost = best_slot_num = -1;
4916 x = NULL_RTX(rtx) 0;
4917 /* It means that the pseudo was spilled in the reload pass, try
4918 to reuse a slot. */
4919 for (slot_num = 0;
4920 slot_num < ira_spilled_reg_stack_slots_num;
4921 slot_num++)
4922 {
4923 slot = &ira_spilled_reg_stack_slots[slot_num];
4924 if (slot->mem == NULL_RTX(rtx) 0)
4925 continue;
4926 if (maybe_lt (slot->width, total_size)
4927 || maybe_lt (GET_MODE_SIZE (GET_MODE (slot->mem)((machine_mode) (slot->mem)->mode)), inherent_size))
4928 continue;
4929
4930 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,for (bmp_iter_set_init (&(bi), (&slot->spilled_regs
), (76), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
4931 FIRST_PSEUDO_REGISTER, i, bi)for (bmp_iter_set_init (&(bi), (&slot->spilled_regs
), (76), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
4932 {
4933 another_allocno = ira_regno_allocno_map[i];
4934 if (allocnos_conflict_by_live_ranges_p (allocno,
4935 another_allocno))
4936 goto cont;
4937 }
4938 for (cost = 0, cp = ALLOCNO_COPIES (allocno)((allocno)->allocno_copies);
4939 cp != NULLnullptr;
4940 cp = next_cp)
4941 {
4942 if (cp->first == allocno)
4943 {
4944 next_cp = cp->next_first_allocno_copy;
4945 another_allocno = cp->second;
4946 }
4947 else if (cp->second == allocno)
4948 {
4949 next_cp = cp->next_second_allocno_copy;
4950 another_allocno = cp->first;
4951 }
4952 else
4953 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4953, __FUNCTION__))
;
4954 if (cp->insn == NULL_RTX(rtx) 0)
4955 continue;
4956 if (bitmap_bit_p (&slot->spilled_regs,
4957 ALLOCNO_REGNO (another_allocno)((another_allocno)->regno)))
4958 cost += cp->freq;
4959 }
4960 if (cost > best_cost)
4961 {
4962 best_cost = cost;
4963 best_slot_num = slot_num;
4964 }
4965 cont:
4966 ;
4967 }
4968 if (best_cost >= 0)
4969 {
4970 slot_num = best_slot_num;
4971 slot = &ira_spilled_reg_stack_slots[slot_num];
4972 SET_REGNO_REG_SET (&slot->spilled_regs, regno)bitmap_set_bit (&slot->spilled_regs, regno);
4973 x = slot->mem;
4974 ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) = -slot_num - 2;
4975 }
4976 }
4977 if (x != NULL_RTX(rtx) 0)
4978 {
4979 ira_assert (known_ge (slot->width, total_size))((void)(!((!maybe_lt (slot->width, total_size))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4979, __FUNCTION__), 0 : 0))
;
4980#ifdef ENABLE_IRA_CHECKING
4981 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,for (bmp_iter_set_init (&(bi), (&slot->spilled_regs
), (76), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
4982 FIRST_PSEUDO_REGISTER, i, bi)for (bmp_iter_set_init (&(bi), (&slot->spilled_regs
), (76), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
4983 {
4984 ira_assert (! conflict_by_live_ranges_p (regno, i))((void)(!(! conflict_by_live_ranges_p (regno, i)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 4984, __FUNCTION__), 0 : 0))
;
4985 }
4986#endif
4987 SET_REGNO_REG_SET (&slot->spilled_regs, regno)bitmap_set_bit (&slot->spilled_regs, regno);
4988 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4989 {
4990 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4991 regno, REG_FREQ (regno)(reg_info_p[regno].freq), slot_num);
4992 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,for (bmp_iter_set_init (&(bi), (&slot->spilled_regs
), (76), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
4993 FIRST_PSEUDO_REGISTER, i, bi)for (bmp_iter_set_init (&(bi), (&slot->spilled_regs
), (76), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next
(&(bi), &(i)))
4994 {
4995 if ((unsigned) regno != i)
4996 fprintf (ira_dump_file, " %d", i);
4997 }
4998 fprintf (ira_dump_file, "\n");
4999 }
5000 }
5001 return x;
5002}
5003
5004/* This is called by reload every time a new stack slot X with
5005 TOTAL_SIZE was allocated for REGNO. We store this info for
5006 subsequent ira_reuse_stack_slot calls. */
5007void
5008ira_mark_new_stack_slot (rtx x, int regno, poly_uint64 total_size)
5009{
5010 class ira_spilled_reg_stack_slot *slot;
5011 int slot_num;
5012 ira_allocno_t allocno;
5013
5014 ira_assert (! ira_use_lra_p)((void)(!(! ira_use_lra_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 5014, __FUNCTION__), 0 : 0))
;
5015
5016 ira_assert (known_le (PSEUDO_REGNO_BYTES (regno), total_size))((void)(!((!maybe_lt (total_size, GET_MODE_SIZE (((machine_mode
) (regno_reg_rtx[regno])->mode))))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 5016, __FUNCTION__), 0 : 0))
;
5017 allocno = ira_regno_allocno_map[regno];
5018 slot_num = -ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) - 2;
5019 if (slot_num == -1)
5020 {
5021 slot_num = ira_spilled_reg_stack_slots_num++;
5022 ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) = -slot_num - 2;
5023 }
5024 slot = &ira_spilled_reg_stack_slots[slot_num];
5025 INIT_REG_SET (&slot->spilled_regs)bitmap_initialize (&slot->spilled_regs, &reg_obstack
)
;
5026 SET_REGNO_REG_SET (&slot->spilled_regs, regno)bitmap_set_bit (&slot->spilled_regs, regno);
5027 slot->mem = x;
5028 slot->width = total_size;
5029 if (internal_flag_ira_verbose > 3 && ira_dump_file)
5030 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
5031 regno, REG_FREQ (regno)(reg_info_p[regno].freq), slot_num);
5032}
5033
5034
5035/* Return spill cost for pseudo-registers whose numbers are in array
5036 REGNOS (with a negative number as an end marker) for reload with
5037 given IN and OUT for INSN. Return also number points (through
5038 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
5039 the register pressure is high, number of references of the
5040 pseudo-registers (through NREFS), the number of psuedo registers
5041 whose allocated register wouldn't need saving in the prologue
5042 (through CALL_USED_COUNT), and the first hard regno occupied by the
5043 pseudo-registers (through FIRST_HARD_REGNO). */
5044static int
5045calculate_spill_cost (int *regnos, rtx in, rtx out, rtx_insn *insn,
5046 int *excess_pressure_live_length,
5047 int *nrefs, int *call_used_count, int *first_hard_regno)
5048{
5049 int i, cost, regno, hard_regno, count, saved_cost;
5050 bool in_p, out_p;
5051 int length;
5052 ira_allocno_t a;
5053
5054 *nrefs = 0;
5055 for (length = count = cost = i = 0;; i++)
5056 {
5057 regno = regnos[i];
5058 if (regno < 0)
5059 break;
5060 *nrefs += REG_N_REFS (regno);
5061 hard_regno = reg_renumber[regno];
5062 ira_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-color.cc"
, 5062, __FUNCTION__), 0 : 0))
;
5063 a = ira_regno_allocno_map[regno];
5064 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)((a)->excess_pressure_points_num) / ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
5065 cost += ALLOCNO_MEMORY_COST (a)((a)->memory_cost) - ALLOCNO_CLASS_COST (a)((a)->class_cost);
5066 if (in_hard_reg_set_p (crtl(&x_rtl)->abi->full_reg_clobbers (),
5067 ALLOCNO_MODE (a)((a)->mode), hard_regno))
5068 count++;
5069 in_p = in && REG_P (in)(((enum rtx_code) (in)->code) == REG) && (int) REGNO (in)(rhs_regno(in)) == hard_regno;
5070 out_p = out && REG_P (out)(((enum rtx_code) (out)->code) == REG) && (int) REGNO (out)(rhs_regno(out)) == hard_regno;
5071 if ((in_p || out_p)
5072 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX(rtx) 0)
5073 {
5074 saved_cost = 0;
5075 if (in_p)
5076 saved_cost += ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)
5077 [ALLOCNO_MODE (a)((a)->mode)][ALLOCNO_CLASS (a)((a)->aclass)][1];
5078 if (out_p)
5079 saved_cost
5080 += ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)
5081 [ALLOCNO_MODE (a)((a)->mode)][ALLOCNO_CLASS (a)((a)->aclass)][0];
5082 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn))((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)->
cfg->count_max.initialized_p ()) ? 1000 : ((BLOCK_FOR_INSN
(insn))->count.to_frequency ((cfun + 0)) * 1000 / 10000) ?
((BLOCK_FOR_INSN (insn))->count.to_frequency ((cfun + 0))
* 1000 / 10000) : 1)
* saved_cost;
5083 }
5084 }
5085 *excess_pressure_live_length = length;
5086 *call_used_count = count;
5087 hard_regno = -1;
5088 if (regnos[0] >= 0)
5089 {
5090 hard_regno = reg_renumber[regnos[0]];
5091 }
5092 *first_hard_regno = hard_regno;
5093 return cost;
5094}
5095
5096/* Return TRUE if spilling pseudo-registers whose numbers are in array
5097 REGNOS is better than spilling pseudo-registers with numbers in
5098 OTHER_REGNOS for reload with given IN and OUT for INSN. The
5099 function used by the reload pass to make better register spilling
5100 decisions. */
5101bool
5102ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
5103 rtx in, rtx out, rtx_insn *insn)
5104{
5105 int cost, other_cost;
5106 int length, other_length;
5107 int nrefs, other_nrefs;
5108 int call_used_count, other_call_used_count;
5109 int hard_regno, other_hard_regno;
5110
5111 cost = calculate_spill_cost (regnos, in, out, insn,
5112 &length, &nrefs, &call_used_count, &hard_regno);
5113 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
5114 &other_length, &other_nrefs,
5115 &other_call_used_count,
5116 &other_hard_regno);
5117 if (nrefs == 0 && other_nrefs != 0)
5118 return true;
5119 if (nrefs != 0 && other_nrefs == 0)
5120 return false;
5121 if (cost != other_cost)
5122 return cost < other_cost;
5123 if (length != other_length)
5124 return length > other_length;
5125#ifdef REG_ALLOC_ORDER{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48
, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75 }
5126 if (hard_regno >= 0 && other_hard_regno >= 0)
5127 return (inv_reg_alloc_order(this_target_hard_regs->x_inv_reg_alloc_order)[hard_regno]
5128 < inv_reg_alloc_order(this_target_hard_regs->x_inv_reg_alloc_order)[other_hard_regno]);
5129#else
5130 if (call_used_count != other_call_used_count)
5131 return call_used_count > other_call_used_count;
5132#endif
5133 return false;
5134}
5135
5136
5137
5138/* Allocate and initialize data necessary for assign_hard_reg. */
5139void
5140ira_initiate_assign (void)
5141{
5142 sorted_allocnos
5143 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
5144 * ira_allocnos_num);
5145 consideration_allocno_bitmap = ira_allocate_bitmap ();
5146 initiate_cost_update ();
5147 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
5148 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
5149 * sizeof (ira_copy_t));
5150}
5151
5152/* Deallocate data used by assign_hard_reg. */
5153void
5154ira_finish_assign (void)
5155{
5156 ira_free (sorted_allocnos);
5157 ira_free_bitmap (consideration_allocno_bitmap);
5158 finish_cost_update ();
5159 ira_free (allocno_priorities);
5160 ira_free (sorted_copies);
5161}
5162
5163
5164
5165/* Entry function doing color-based register allocation. */
5166static void
5167color (void)
5168{
5169 allocno_stack_vec.create (ira_allocnos_num);
5170 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
5171 ira_initiate_assign ();
5172 do_coloring ();
5173 ira_finish_assign ();
5174 allocno_stack_vec.release ();
5175 move_spill_restore ();
5176}
5177
5178
5179
5180/* This page contains a simple register allocator without usage of
5181 allocno conflicts. This is used for fast allocation for -O0. */
5182
5183/* Do register allocation by not using allocno conflicts. It uses
5184 only allocno live ranges. The algorithm is close to Chow's
5185 priority coloring. */
5186static void
5187fast_allocation (void)
5188{
5189 int i, j, k, num, class_size, hard_regno, best_hard_regno, cost, min_cost;
5190 int *costs;
5191#ifdef STACK_REGS
5192 bool no_stack_reg_p;
5193#endif
5194 enum reg_class aclass;
5195 machine_mode mode;
5196 ira_allocno_t a;
5197 ira_allocno_iterator ai;
5198 live_range_t r;
5199 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
5200
5201 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
5202 * ira_allocnos_num);
5203 num = 0;
5204 FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond
(&(ai), &(a));)
5205 sorted_allocnos[num++] = a;
5206 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
5207 setup_allocno_priorities (sorted_allocnos, num);
5208 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
5209 * ira_max_point);
5210 for (i = 0; i < ira_max_point; i++)
5211 CLEAR_HARD_REG_SET (used_hard_regs[i]);
5212 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),gcc_qsort (sorted_allocnos, num, sizeof (ira_allocno_t), allocno_priority_compare_func
)
5213 allocno_priority_compare_func)gcc_qsort (sorted_allocnos, num, sizeof (ira_allocno_t), allocno_priority_compare_func
)
;
5214 for (i = 0; i < num; i++)
5215 {
5216 int nr, l;
5217
5218 a = sorted_allocnos[i];
5219 nr = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects);
5220 CLEAR_HARD_REG_SET (conflict_hard_regs);
5221 for (l = 0; l < nr; l++)
5222 {
5223 ira_object_t obj = ALLOCNO_OBJECT (a, l)((a)->objects[l]);
5224 conflict_hard_regs |= OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs);
5225 for (r = OBJECT_LIVE_RANGES (obj)((obj)->live_ranges); r != NULLnullptr; r = r->next)
5226 for (j = r->start; j <= r->finish; j++)
5227 conflict_hard_regs |= used_hard_regs[j];
5228 }
5229 aclass = ALLOCNO_CLASS (a)((a)->aclass);
5230 ALLOCNO_ASSIGNED_P (a)((a)->assigned_p) = true;
5231 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = -1;
5232 if (hard_reg_set_subset_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[aclass],
5233 conflict_hard_regs))
5234 continue;
5235 mode = ALLOCNO_MODE (a)((a)->mode);
5236#ifdef STACK_REGS
5237 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a)((a)->no_stack_reg_p);
5238#endif
5239 class_size = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[aclass];
5240 costs = ALLOCNO_HARD_REG_COSTS (a)((a)->hard_reg_costs);
5241 min_cost = INT_MAX2147483647;
5242 best_hard_regno = -1;
5243 for (j = 0; j < class_size; j++)
5244 {
5245 hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[aclass][j];
5246#ifdef STACK_REGS
5247 if (no_stack_reg_p && FIRST_STACK_REG8 <= hard_regno
5248 && hard_regno <= LAST_STACK_REG15)
5249 continue;
5250#endif
5251 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
5252 || (TEST_HARD_REG_BIT
5253 (ira_prohibited_class_mode_regs(this_target_ira->x_ira_prohibited_class_mode_regs)[aclass][mode], hard_regno)))
5254 continue;
5255 if (costs == NULLnullptr)
5256 {
5257 best_hard_regno = hard_regno;
5258 break;
5259 }
5260 cost = costs[j];
5261 if (min_cost > cost)
5262 {
5263 min_cost = cost;
5264 best_hard_regno = hard_regno;
5265 }
5266 }
5267 if (best_hard_regno < 0)
5268 continue;
5269 ALLOCNO_HARD_REGNO (a)((a)->hard_regno) = hard_regno = best_hard_regno;
5270 for (l = 0; l < nr; l++)
5271 {
5272 ira_object_t obj = ALLOCNO_OBJECT (a, l)((a)->objects[l]);
5273 for (r = OBJECT_LIVE_RANGES (obj)((obj)->live_ranges); r != NULLnullptr; r = r->next)
5274 for (k = r->start; k <= r->finish; k++)
5275 used_hard_regs[k] |= ira_reg_mode_hard_regset(this_target_ira_int->x_ira_reg_mode_hard_regset)[hard_regno][mode];
5276 }
5277 }
5278 ira_free (sorted_allocnos);
5279 ira_free (used_hard_regs);
5280 ira_free (allocno_priorities);
5281 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULLnullptr)
5282 ira_print_disposition (ira_dump_file);
5283}
5284
5285
5286
5287/* Entry function doing coloring. */
5288void
5289ira_color (void)
5290{
5291 ira_allocno_t a;
5292 ira_allocno_iterator ai;
5293
5294 /* Setup updated costs. */
5295 FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond
(&(ai), &(a));)
5296 {
5297 ALLOCNO_UPDATED_MEMORY_COST (a)((a)->updated_memory_cost) = ALLOCNO_MEMORY_COST (a)((a)->memory_cost);
5298 ALLOCNO_UPDATED_CLASS_COST (a)((a)->updated_class_cost) = ALLOCNO_CLASS_COST (a)((a)->class_cost);
5299 }
5300 if (ira_conflicts_p)
5301 color ();
5302 else
5303 fast_allocation ();
5304}