Bug Summary

File:build/gcc/haifa-sched.cc
Warning:line 3240, column 52
The right operand of '!=' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-suse-linux -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name haifa-sched.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -resource-dir /usr/lib64/clang/15.0.7 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/backward -internal-isystem /usr/lib64/clang/15.0.7/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../x86_64-suse-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2023-03-27-141847-20772-1/report-9gqhk4.plist -x c++ /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc
1/* Instruction scheduling pass.
2 Copyright (C) 1992-2023 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22/* Instruction scheduling pass. This file, along with sched-deps.cc,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.cc.
25
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
34
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
39
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
53 remaining slots.
54
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
57
58 1. choose insn with the longest path to end of bb, ties
59 broken by
60 2. choose insn with least contribution to register pressure,
61 ties broken by
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
65 broken by
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
70
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
77
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
81
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
86
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
94 utilization.
95
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
99 of this case.
100
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
105
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
109
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
114
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
118
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
124
125#include "config.h"
126#include "system.h"
127#include "coretypes.h"
128#include "backend.h"
129#include "target.h"
130#include "rtl.h"
131#include "cfghooks.h"
132#include "df.h"
133#include "memmodel.h"
134#include "tm_p.h"
135#include "insn-config.h"
136#include "regs.h"
137#include "ira.h"
138#include "recog.h"
139#include "insn-attr.h"
140#include "cfgrtl.h"
141#include "cfgbuild.h"
142#include "sched-int.h"
143#include "common/common-target.h"
144#include "dbgcnt.h"
145#include "cfgloop.h"
146#include "dumpfile.h"
147#include "print-rtl.h"
148#include "function-abi.h"
149
150#ifdef INSN_SCHEDULING
151
152/* True if we do register pressure relief through live-range
153 shrinkage. */
154static bool live_range_shrinkage_p;
155
156/* Switch on live range shrinkage. */
157void
158initialize_live_range_shrinkage (void)
159{
160 live_range_shrinkage_p = true;
161}
162
163/* Switch off live range shrinkage. */
164void
165finish_live_range_shrinkage (void)
166{
167 live_range_shrinkage_p = false;
168}
169
170/* issue_rate is the number of insns that can be scheduled in the same
171 machine cycle. It can be defined in the config/mach/mach.h file,
172 otherwise we set it to 1. */
173
174int issue_rate;
175
176/* This can be set to true by a backend if the scheduler should not
177 enable a DCE pass. */
178bool sched_no_dce;
179
180/* The current initiation interval used when modulo scheduling. */
181static int modulo_ii;
182
183/* The maximum number of stages we are prepared to handle. */
184static int modulo_max_stages;
185
186/* The number of insns that exist in each iteration of the loop. We use this
187 to detect when we've scheduled all insns from the first iteration. */
188static int modulo_n_insns;
189
190/* The current count of insns in the first iteration of the loop that have
191 already been scheduled. */
192static int modulo_insns_scheduled;
193
194/* The maximum uid of insns from the first iteration of the loop. */
195static int modulo_iter0_max_uid;
196
197/* The number of times we should attempt to backtrack when modulo scheduling.
198 Decreased each time we have to backtrack. */
199static int modulo_backtracks_left;
200
201/* The stage in which the last insn from the original loop was
202 scheduled. */
203static int modulo_last_stage;
204
205/* sched-verbose controls the amount of debugging output the
206 scheduler prints. It is controlled by -fsched-verbose=N:
207 N=0: no debugging output.
208 N=1: default value.
209 N=2: bb's probabilities, detailed ready list info, unit/insn info.
210 N=3: rtl at abort point, control-flow, regions info.
211 N=5: dependences info. */
212int sched_verbose = 0;
213
214/* Debugging file. All printouts are sent to dump. */
215FILE *sched_dump = 0;
216
217/* This is a placeholder for the scheduler parameters common
218 to all schedulers. */
219struct common_sched_info_def *common_sched_info;
220
221#define INSN_TICK(INSN)((&h_i_d[INSN_UID (INSN)])->tick) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->tick)
222#define INSN_EXACT_TICK(INSN)((&h_i_d[INSN_UID (INSN)])->exact_tick) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->exact_tick)
223#define INSN_TICK_ESTIMATE(INSN)((&h_i_d[INSN_UID (INSN)])->tick_estimate) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->tick_estimate)
224#define INTER_TICK(INSN)((&h_i_d[INSN_UID (INSN)])->inter_tick) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->inter_tick)
225#define FEEDS_BACKTRACK_INSN(INSN)((&h_i_d[INSN_UID (INSN)])->feeds_backtrack_insn) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->feeds_backtrack_insn)
226#define SHADOW_P(INSN)((&h_i_d[INSN_UID (INSN)])->shadow_p) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->shadow_p)
227#define MUST_RECOMPUTE_SPEC_P(INSN)((&h_i_d[INSN_UID (INSN)])->must_recompute_spec) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->must_recompute_spec)
228/* Cached cost of the instruction. Use insn_sched_cost to get cost of the
229 insn. -1 here means that the field is not initialized. */
230#define INSN_COST(INSN)((&h_i_d[INSN_UID (INSN)])->cost) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->cost)
231
232/* If INSN_TICK of an instruction is equal to INVALID_TICK,
233 then it should be recalculated from scratch. */
234#define INVALID_TICK(-(max_insn_queue_index + 1)) (-(max_insn_queue_index + 1))
235/* The minimal value of the INSN_TICK of an instruction. */
236#define MIN_TICK(-max_insn_queue_index) (-max_insn_queue_index)
237
238/* Original order of insns in the ready list.
239 Used to keep order of normal insns while separating DEBUG_INSNs. */
240#define INSN_RFS_DEBUG_ORIG_ORDER(INSN)((&h_i_d[INSN_UID (INSN)])->rfs_debug_orig_order) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->rfs_debug_orig_order)
241
242/* The deciding reason for INSN's place in the ready list. */
243#define INSN_LAST_RFS_WIN(INSN)((&h_i_d[INSN_UID (INSN)])->last_rfs_win) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->last_rfs_win)
244
245/* List of important notes we must keep around. This is a pointer to the
246 last element in the list. */
247rtx_insn *note_list;
248
249static struct spec_info_def spec_info_var;
250/* Description of the speculative part of the scheduling.
251 If NULL - no speculation. */
252spec_info_t spec_info = NULLnullptr;
253
254/* True, if recovery block was added during scheduling of current block.
255 Used to determine, if we need to fix INSN_TICKs. */
256static bool haifa_recovery_bb_recently_added_p;
257
258/* True, if recovery block was added during this scheduling pass.
259 Used to determine if we should have empty memory pools of dependencies
260 after finishing current region. */
261bool haifa_recovery_bb_ever_added_p;
262
263/* Counters of different types of speculative instructions. */
264static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
265
266/* Array used in {unlink, restore}_bb_notes. */
267static rtx_insn **bb_header = 0;
268
269/* Basic block after which recovery blocks will be created. */
270static basic_block before_recovery;
271
272/* Basic block just before the EXIT_BLOCK and after recovery, if we have
273 created it. */
274basic_block after_recovery;
275
276/* FALSE if we add bb to another region, so we don't need to initialize it. */
277bool adding_bb_to_current_region_p = true;
278
279/* Queues, etc. */
280
281/* An instruction is ready to be scheduled when all insns preceding it
282 have already been scheduled. It is important to ensure that all
283 insns which use its result will not be executed until its result
284 has been computed. An insn is maintained in one of four structures:
285
286 (P) the "Pending" set of insns which cannot be scheduled until
287 their dependencies have been satisfied.
288 (Q) the "Queued" set of insns that can be scheduled when sufficient
289 time has passed.
290 (R) the "Ready" list of unscheduled, uncommitted insns.
291 (S) the "Scheduled" list of insns.
292
293 Initially, all insns are either "Pending" or "Ready" depending on
294 whether their dependencies are satisfied.
295
296 Insns move from the "Ready" list to the "Scheduled" list as they
297 are committed to the schedule. As this occurs, the insns in the
298 "Pending" list have their dependencies satisfied and move to either
299 the "Ready" list or the "Queued" set depending on whether
300 sufficient time has passed to make them ready. As time passes,
301 insns move from the "Queued" set to the "Ready" list.
302
303 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
304 unscheduled insns, i.e., those that are ready, queued, and pending.
305 The "Queued" set (Q) is implemented by the variable `insn_queue'.
306 The "Ready" list (R) is implemented by the variables `ready' and
307 `n_ready'.
308 The "Scheduled" list (S) is the new insn chain built by this pass.
309
310 The transition (R->S) is implemented in the scheduling loop in
311 `schedule_block' when the best insn to schedule is chosen.
312 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
313 insns move from the ready list to the scheduled list.
314 The transition (Q->R) is implemented in 'queue_to_insn' as time
315 passes or stalls are introduced. */
316
317/* Implement a circular buffer to delay instructions until sufficient
318 time has passed. For the new pipeline description interface,
319 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
320 than maximal time of instruction execution computed by genattr.cc on
321 the base maximal time of functional unit reservations and getting a
322 result. This is the longest time an insn may be queued. */
323
324static rtx_insn_list **insn_queue;
325static int q_ptr = 0;
326static int q_size = 0;
327#define NEXT_Q(X)(((X)+1) & max_insn_queue_index) (((X)+1) & max_insn_queue_index)
328#define NEXT_Q_AFTER(X, C)(((X)+C) & max_insn_queue_index) (((X)+C) & max_insn_queue_index)
329
330#define QUEUE_SCHEDULED(-3) (-3)
331#define QUEUE_NOWHERE(-2) (-2)
332#define QUEUE_READY(-1) (-1)
333/* QUEUE_SCHEDULED - INSN is scheduled.
334 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
335 queue or ready list.
336 QUEUE_READY - INSN is in ready list.
337 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
338
339#define QUEUE_INDEX(INSN)((&h_i_d[INSN_UID (INSN)])->queue_index) (HID (INSN)(&h_i_d[INSN_UID (INSN)])->queue_index)
340
341/* The following variable value refers for all current and future
342 reservations of the processor units. */
343state_t curr_state;
344
345/* The following variable value is size of memory representing all
346 current and future reservations of the processor units. */
347size_t dfa_state_size;
348
349/* The following array is used to find the best insn from ready when
350 the automaton pipeline interface is used. */
351signed char *ready_try = NULLnullptr;
352
353/* The ready list. */
354struct ready_list ready = {NULLnullptr, 0, 0, 0, 0};
355
356/* The pointer to the ready list (to be removed). */
357static struct ready_list *readyp = &ready;
358
359/* Scheduling clock. */
360static int clock_var;
361
362/* Clock at which the previous instruction was issued. */
363static int last_clock_var;
364
365/* Set to true if, when queuing a shadow insn, we discover that it would be
366 scheduled too late. */
367static bool must_backtrack;
368
369/* The following variable value is number of essential insns issued on
370 the current cycle. An insn is essential one if it changes the
371 processors state. */
372int cycle_issued_insns;
373
374/* This records the actual schedule. It is built up during the main phase
375 of schedule_block, and afterwards used to reorder the insns in the RTL. */
376static vec<rtx_insn *> scheduled_insns;
377
378static int may_trap_exp (const_rtx, int);
379
380/* Nonzero iff the address is comprised from at most 1 register. */
381#define CONST_BASED_ADDRESS_P(x)((((enum rtx_code) (x)->code) == REG) || ((((enum rtx_code
) (x)->code) == PLUS || ((enum rtx_code) (x)->code) == MINUS
|| (((enum rtx_code) (x)->code) == LO_SUM)) && ((
(rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[0]).rt_rtx
))->code))]) == RTX_CONST_OBJ) || ((rtx_class[(int) (((enum
rtx_code) ((((x)->u.fld[1]).rt_rtx))->code))]) == RTX_CONST_OBJ
))))
\
382 (REG_P (x)(((enum rtx_code) (x)->code) == REG) \
383 || ((GET_CODE (x)((enum rtx_code) (x)->code) == PLUS || GET_CODE (x)((enum rtx_code) (x)->code) == MINUS \
384 || (GET_CODE (x)((enum rtx_code) (x)->code) == LO_SUM)) \
385 && (CONSTANT_P (XEXP (x, 0))((rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[0]).rt_rtx
))->code))]) == RTX_CONST_OBJ)
\
386 || CONSTANT_P (XEXP (x, 1))((rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[1]).rt_rtx
))->code))]) == RTX_CONST_OBJ)
)))
387
388/* Returns a class that insn with GET_DEST(insn)=x may belong to,
389 as found by analyzing insn's expression. */
390
391
392static int haifa_luid_for_non_insn (rtx x);
393
394/* Haifa version of sched_info hooks common to all headers. */
395const struct common_sched_info_def haifa_common_sched_info =
396 {
397 NULLnullptr, /* fix_recovery_cfg */
398 NULLnullptr, /* add_block */
399 NULLnullptr, /* estimate_number_of_insns */
400 haifa_luid_for_non_insn, /* luid_for_non_insn */
401 SCHED_PASS_UNKNOWN /* sched_pass_id */
402 };
403
404/* Mapping from instruction UID to its Logical UID. */
405vec<int> sched_luids;
406
407/* Next LUID to assign to an instruction. */
408int sched_max_luid = 1;
409
410/* Haifa Instruction Data. */
411vec<haifa_insn_data_def> h_i_d;
412
413void (* sched_init_only_bb) (basic_block, basic_block);
414
415/* Split block function. Different schedulers might use different functions
416 to handle their internal data consistent. */
417basic_block (* sched_split_block) (basic_block, rtx);
418
419/* Create empty basic block after the specified block. */
420basic_block (* sched_create_empty_bb) (basic_block);
421
422/* Return the number of cycles until INSN is expected to be ready.
423 Return zero if it already is. */
424static int
425insn_delay (rtx_insn *insn)
426{
427 return MAX (INSN_TICK (insn) - clock_var, 0)((((&h_i_d[INSN_UID (insn)])->tick) - clock_var) > (
0) ? (((&h_i_d[INSN_UID (insn)])->tick) - clock_var) :
(0))
;
428}
429
430static int
431may_trap_exp (const_rtx x, int is_store)
432{
433 enum rtx_code code;
434
435 if (x == 0)
436 return TRAP_FREE;
437 code = GET_CODE (x)((enum rtx_code) (x)->code);
438 if (is_store)
439 {
440 if (code == MEM && may_trap_p (x))
441 return TRAP_RISKY;
442 else
443 return TRAP_FREE;
444 }
445 if (code == MEM)
446 {
447 /* The insn uses memory: a volatile load. */
448 if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 448, __FUNCTION__); _rtx; })->volatil)
)
449 return IRISKY;
450 /* An exception-free load. */
451 if (!may_trap_p (x))
452 return IFREE;
453 /* A load with 1 base register, to be further checked. */
454 if (CONST_BASED_ADDRESS_P (XEXP (x, 0))((((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) ==
REG) || ((((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->
code) == PLUS || ((enum rtx_code) ((((x)->u.fld[0]).rt_rtx
))->code) == MINUS || (((enum rtx_code) ((((x)->u.fld[0
]).rt_rtx))->code) == LO_SUM)) && (((rtx_class[(int
) (((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld
[0]).rt_rtx))->code))]) == RTX_CONST_OBJ) || ((rtx_class[(
int) (((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u
.fld[1]).rt_rtx))->code))]) == RTX_CONST_OBJ))))
)
455 return PFREE_CANDIDATE;
456 /* No info on the load, to be further checked. */
457 return PRISKY_CANDIDATE;
458 }
459 else
460 {
461 const char *fmt;
462 int i, insn_class = TRAP_FREE;
463
464 /* Neither store nor load, check if it may cause a trap. */
465 if (may_trap_p (x))
466 return TRAP_RISKY;
467 /* Recursive step: walk the insn... */
468 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
469 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
470 {
471 if (fmt[i] == 'e')
472 {
473 int tmp_class = may_trap_exp (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), is_store);
474 insn_class = WORST_CLASS (insn_class, tmp_class)((insn_class > tmp_class) ? insn_class : tmp_class);
475 }
476 else if (fmt[i] == 'E')
477 {
478 int j;
479 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
480 {
481 int tmp_class = may_trap_exp (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), is_store);
482 insn_class = WORST_CLASS (insn_class, tmp_class)((insn_class > tmp_class) ? insn_class : tmp_class);
483 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
484 break;
485 }
486 }
487 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
488 break;
489 }
490 return insn_class;
491 }
492}
493
494/* Classifies rtx X of an insn for the purpose of verifying that X can be
495 executed speculatively (and consequently the insn can be moved
496 speculatively), by examining X, returning:
497 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
498 TRAP_FREE: non-load insn.
499 IFREE: load from a globally safe location.
500 IRISKY: volatile load.
501 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
502 being either PFREE or PRISKY. */
503
504static int
505haifa_classify_rtx (const_rtx x)
506{
507 int tmp_class = TRAP_FREE;
508 int insn_class = TRAP_FREE;
509 enum rtx_code code;
510
511 if (GET_CODE (x)((enum rtx_code) (x)->code) == PARALLEL)
512 {
513 int i, len = XVECLEN (x, 0)(((((x)->u.fld[0]).rt_rtvec))->num_elem);
514
515 for (i = len - 1; i >= 0; i--)
516 {
517 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i)(((((x)->u.fld[0]).rt_rtvec))->elem[i]));
518 insn_class = WORST_CLASS (insn_class, tmp_class)((insn_class > tmp_class) ? insn_class : tmp_class);
519 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
520 break;
521 }
522 }
523 else
524 {
525 code = GET_CODE (x)((enum rtx_code) (x)->code);
526 switch (code)
527 {
528 case CLOBBER:
529 /* Test if it is a 'store'. */
530 tmp_class = may_trap_exp (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), 1);
531 break;
532 case SET:
533 /* Test if it is a store. */
534 tmp_class = may_trap_exp (SET_DEST (x)(((x)->u.fld[0]).rt_rtx), 1);
535 if (tmp_class == TRAP_RISKY)
536 break;
537 /* Test if it is a load. */
538 tmp_class =
539 WORST_CLASS (tmp_class,((tmp_class > may_trap_exp ((((x)->u.fld[1]).rt_rtx), 0
)) ? tmp_class : may_trap_exp ((((x)->u.fld[1]).rt_rtx), 0
))
540 may_trap_exp (SET_SRC (x), 0))((tmp_class > may_trap_exp ((((x)->u.fld[1]).rt_rtx), 0
)) ? tmp_class : may_trap_exp ((((x)->u.fld[1]).rt_rtx), 0
))
;
541 break;
542 case COND_EXEC:
543 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x)(((x)->u.fld[1]).rt_rtx));
544 if (tmp_class == TRAP_RISKY)
545 break;
546 tmp_class = WORST_CLASS (tmp_class,((tmp_class > may_trap_exp ((((x)->u.fld[0]).rt_rtx), 0
)) ? tmp_class : may_trap_exp ((((x)->u.fld[0]).rt_rtx), 0
))
547 may_trap_exp (COND_EXEC_TEST (x), 0))((tmp_class > may_trap_exp ((((x)->u.fld[0]).rt_rtx), 0
)) ? tmp_class : may_trap_exp ((((x)->u.fld[0]).rt_rtx), 0
))
;
548 break;
549 case TRAP_IF:
550 tmp_class = TRAP_RISKY;
551 break;
552 default:;
553 }
554 insn_class = tmp_class;
555 }
556
557 return insn_class;
558}
559
560int
561haifa_classify_insn (const_rtx insn)
562{
563 return haifa_classify_rtx (PATTERN (insn));
564}
565
566/* After the scheduler initialization function has been called, this function
567 can be called to enable modulo scheduling. II is the initiation interval
568 we should use, it affects the delays for delay_pairs that were recorded as
569 separated by a given number of stages.
570
571 MAX_STAGES provides us with a limit
572 after which we give up scheduling; the caller must have unrolled at least
573 as many copies of the loop body and recorded delay_pairs for them.
574
575 INSNS is the number of real (non-debug) insns in one iteration of
576 the loop. MAX_UID can be used to test whether an insn belongs to
577 the first iteration of the loop; all of them have a uid lower than
578 MAX_UID. */
579void
580set_modulo_params (int ii, int max_stages, int insns, int max_uid)
581{
582 modulo_ii = ii;
583 modulo_max_stages = max_stages;
584 modulo_n_insns = insns;
585 modulo_iter0_max_uid = max_uid;
586 modulo_backtracks_left = param_max_modulo_backtrack_attemptsglobal_options.x_param_max_modulo_backtrack_attempts;
587}
588
589/* A structure to record a pair of insns where the first one is a real
590 insn that has delay slots, and the second is its delayed shadow.
591 I1 is scheduled normally and will emit an assembly instruction,
592 while I2 describes the side effect that takes place at the
593 transition between cycles CYCLES and (CYCLES + 1) after I1. */
594struct delay_pair
595{
596 struct delay_pair *next_same_i1;
597 rtx_insn *i1, *i2;
598 int cycles;
599 /* When doing modulo scheduling, we a delay_pair can also be used to
600 show that I1 and I2 are the same insn in a different stage. If that
601 is the case, STAGES will be nonzero. */
602 int stages;
603};
604
605/* Helpers for delay hashing. */
606
607struct delay_i1_hasher : nofree_ptr_hash <delay_pair>
608{
609 typedef void *compare_type;
610 static inline hashval_t hash (const delay_pair *);
611 static inline bool equal (const delay_pair *, const void *);
612};
613
614/* Returns a hash value for X, based on hashing just I1. */
615
616inline hashval_t
617delay_i1_hasher::hash (const delay_pair *x)
618{
619 return htab_hash_pointer (x->i1);
620}
621
622/* Return true if I1 of pair X is the same as that of pair Y. */
623
624inline bool
625delay_i1_hasher::equal (const delay_pair *x, const void *y)
626{
627 return x->i1 == y;
628}
629
630struct delay_i2_hasher : free_ptr_hash <delay_pair>
631{
632 typedef void *compare_type;
633 static inline hashval_t hash (const delay_pair *);
634 static inline bool equal (const delay_pair *, const void *);
635};
636
637/* Returns a hash value for X, based on hashing just I2. */
638
639inline hashval_t
640delay_i2_hasher::hash (const delay_pair *x)
641{
642 return htab_hash_pointer (x->i2);
643}
644
645/* Return true if I2 of pair X is the same as that of pair Y. */
646
647inline bool
648delay_i2_hasher::equal (const delay_pair *x, const void *y)
649{
650 return x->i2 == y;
651}
652
653/* Two hash tables to record delay_pairs, one indexed by I1 and the other
654 indexed by I2. */
655static hash_table<delay_i1_hasher> *delay_htab;
656static hash_table<delay_i2_hasher> *delay_htab_i2;
657
658/* Called through htab_traverse. Walk the hashtable using I2 as
659 index, and delete all elements involving an UID higher than
660 that pointed to by *DATA. */
661int
662haifa_htab_i2_traverse (delay_pair **slot, int *data)
663{
664 int maxuid = *data;
665 struct delay_pair *p = *slot;
666 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
667 {
668 delay_htab_i2->clear_slot (slot);
669 }
670 return 1;
671}
672
673/* Called through htab_traverse. Walk the hashtable using I2 as
674 index, and delete all elements involving an UID higher than
675 that pointed to by *DATA. */
676int
677haifa_htab_i1_traverse (delay_pair **pslot, int *data)
678{
679 int maxuid = *data;
680 struct delay_pair *p, *first, **pprev;
681
682 if (INSN_UID ((*pslot)->i1) >= maxuid)
683 {
684 delay_htab->clear_slot (pslot);
685 return 1;
686 }
687 pprev = &first;
688 for (p = *pslot; p; p = p->next_same_i1)
689 {
690 if (INSN_UID (p->i2) < maxuid)
691 {
692 *pprev = p;
693 pprev = &p->next_same_i1;
694 }
695 }
696 *pprev = NULLnullptr;
697 if (first == NULLnullptr)
698 delay_htab->clear_slot (pslot);
699 else
700 *pslot = first;
701 return 1;
702}
703
704/* Discard all delay pairs which involve an insn with an UID higher
705 than MAX_UID. */
706void
707discard_delay_pairs_above (int max_uid)
708{
709 delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
710 delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
711}
712
713/* This function can be called by a port just before it starts the final
714 scheduling pass. It records the fact that an instruction with delay
715 slots has been split into two insns, I1 and I2. The first one will be
716 scheduled normally and initiates the operation. The second one is a
717 shadow which must follow a specific number of cycles after I1; its only
718 purpose is to show the side effect that occurs at that cycle in the RTL.
719 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
720 while I2 retains the original insn type.
721
722 There are two ways in which the number of cycles can be specified,
723 involving the CYCLES and STAGES arguments to this function. If STAGES
724 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
725 which is multiplied by MODULO_II to give the number of cycles. This is
726 only useful if the caller also calls set_modulo_params to enable modulo
727 scheduling. */
728
729void
730record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
731{
732 struct delay_pair *p = XNEW (struct delay_pair)((struct delay_pair *) xmalloc (sizeof (struct delay_pair)));
733 struct delay_pair **slot;
734
735 p->i1 = i1;
736 p->i2 = i2;
737 p->cycles = cycles;
738 p->stages = stages;
739
740 if (!delay_htab)
741 {
742 delay_htab = new hash_table<delay_i1_hasher> (10);
743 delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
744 }
745 slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
746 p->next_same_i1 = *slot;
747 *slot = p;
748 slot = delay_htab_i2->find_slot (p, INSERT);
749 *slot = p;
750}
751
752/* Examine the delay pair hashtable to see if INSN is a shadow for another,
753 and return the other insn if so. Return NULL otherwise. */
754rtx_insn *
755real_insn_for_shadow (rtx_insn *insn)
756{
757 struct delay_pair *pair;
758
759 if (!delay_htab)
760 return NULLnullptr;
761
762 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
763 if (!pair || pair->stages > 0)
764 return NULLnullptr;
765 return pair->i1;
766}
767
768/* For a pair P of insns, return the fixed distance in cycles from the first
769 insn after which the second must be scheduled. */
770static int
771pair_delay (struct delay_pair *p)
772{
773 if (p->stages == 0)
774 return p->cycles;
775 else
776 return p->stages * modulo_ii;
777}
778
779/* Given an insn INSN, add a dependence on its delayed shadow if it
780 has one. Also try to find situations where shadows depend on each other
781 and add dependencies to the real insns to limit the amount of backtracking
782 needed. */
783void
784add_delay_dependencies (rtx_insn *insn)
785{
786 struct delay_pair *pair;
787 sd_iterator_def sd_it;
788 dep_t dep;
789
790 if (!delay_htab)
791 return;
792
793 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
794 if (!pair)
795 return;
796 add_dependence (insn, pair->i1, REG_DEP_ANTI);
797 if (pair->stages)
798 return;
799
800 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((pair->i2), (((1) | (2))
)); sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next
(&(sd_it)))
801 {
802 rtx_insn *pro = DEP_PRO (dep)((dep)->pro);
803 struct delay_pair *other_pair
804 = delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
805 if (!other_pair || other_pair->stages)
806 continue;
807 if (pair_delay (other_pair) >= pair_delay (pair))
808 {
809 if (sched_verbose >= 4)
810 {
811 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
812 INSN_UID (other_pair->i1),
813 INSN_UID (pair->i1));
814 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
815 INSN_UID (pair->i1),
816 INSN_UID (pair->i2),
817 pair_delay (pair));
818 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
819 INSN_UID (other_pair->i1),
820 INSN_UID (other_pair->i2),
821 pair_delay (other_pair));
822 }
823 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
824 }
825 }
826}
827
828/* Forward declarations. */
829
830static int priority (rtx_insn *, bool force_recompute = false);
831static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
832static int rank_for_schedule (const void *, const void *);
833static void swap_sort (rtx_insn **, int);
834static void queue_insn (rtx_insn *, int, const char *);
835static int schedule_insn (rtx_insn *);
836static void adjust_priority (rtx_insn *);
837static void advance_one_cycle (void);
838static void extend_h_i_d (void);
839
840
841/* Notes handling mechanism:
842 =========================
843 Generally, NOTES are saved before scheduling and restored after scheduling.
844 The scheduler distinguishes between two types of notes:
845
846 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
847 Before scheduling a region, a pointer to the note is added to the insn
848 that follows or precedes it. (This happens as part of the data dependence
849 computation). After scheduling an insn, the pointer contained in it is
850 used for regenerating the corresponding note (in reemit_notes).
851
852 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
853 these notes are put in a list (in rm_other_notes() and
854 unlink_other_notes ()). After scheduling the block, these notes are
855 inserted at the beginning of the block (in schedule_block()). */
856
857static void ready_add (struct ready_list *, rtx_insn *, bool);
858static rtx_insn *ready_remove_first (struct ready_list *);
859static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
860
861static void queue_to_ready (struct ready_list *);
862static int early_queue_to_ready (state_t, struct ready_list *);
863
864/* The following functions are used to implement multi-pass scheduling
865 on the first cycle. */
866static rtx_insn *ready_remove (struct ready_list *, int);
867static void ready_remove_insn (rtx_insn *);
868
869static void fix_inter_tick (rtx_insn *, rtx_insn *);
870static int fix_tick_ready (rtx_insn *);
871static void change_queue_index (rtx_insn *, int);
872
873/* The following functions are used to implement scheduling of data/control
874 speculative instructions. */
875
876static void extend_h_i_d (void);
877static void init_h_i_d (rtx_insn *);
878static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
879static void generate_recovery_code (rtx_insn *);
880static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
881static void begin_speculative_block (rtx_insn *);
882static void add_to_speculative_block (rtx_insn *);
883static void init_before_recovery (basic_block *);
884static void create_check_block_twin (rtx_insn *, bool);
885static void fix_recovery_deps (basic_block);
886static bool haifa_change_pattern (rtx_insn *, rtx);
887static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
888static void restore_bb_notes (basic_block);
889static void fix_jump_move (rtx_insn *);
890static void move_block_after_check (rtx_insn *);
891static void move_succs (vec<edge, va_gc> **, basic_block);
892static void sched_remove_insn (rtx_insn *);
893static void clear_priorities (rtx_insn *, rtx_vec_t *);
894static void calc_priorities (const rtx_vec_t &);
895static void add_jump_dependencies (rtx_insn *, rtx_insn *);
896
897#endif /* INSN_SCHEDULING */
898
899/* Point to state used for the current scheduling pass. */
900struct haifa_sched_info *current_sched_info;
901
902#ifndef INSN_SCHEDULING
903void
904schedule_insns (void)
905{
906}
907#else
908
909/* Do register pressure sensitive insn scheduling if the flag is set
910 up. */
911enum sched_pressure_algorithm sched_pressure;
912
913/* Map regno -> its pressure class. The map defined only when
914 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
915enum reg_class *sched_regno_pressure_class;
916
917/* The current register pressure. Only elements corresponding pressure
918 classes are defined. */
919static int curr_reg_pressure[N_REG_CLASSES((int) LIM_REG_CLASSES)];
920
921/* Saved value of the previous array. */
922static int saved_reg_pressure[N_REG_CLASSES((int) LIM_REG_CLASSES)];
923
924/* Register living at given scheduling point. */
925static bitmap curr_reg_live;
926
927/* Saved value of the previous array. */
928static bitmap saved_reg_live;
929
930/* Registers mentioned in the current region. */
931static bitmap region_ref_regs;
932
933/* Temporary bitmap used for SCHED_PRESSURE_MODEL. */
934static bitmap tmp_bitmap;
935
936/* Effective number of available registers of a given class (see comment
937 in sched_pressure_start_bb). */
938static int sched_class_regs_num[N_REG_CLASSES((int) LIM_REG_CLASSES)];
939/* The number of registers that the function would need to save before it
940 uses them, and the number of fixed_regs. Helpers for calculating of
941 sched_class_regs_num. */
942static int call_saved_regs_num[N_REG_CLASSES((int) LIM_REG_CLASSES)];
943static int fixed_regs_num[N_REG_CLASSES((int) LIM_REG_CLASSES)];
944
945/* Initiate register pressure relative info for scheduling the current
946 region. Currently it is only clearing register mentioned in the
947 current region. */
948void
949sched_init_region_reg_pressure_info (void)
950{
951 bitmap_clear (region_ref_regs);
952}
953
954/* PRESSURE[CL] describes the pressure on register class CL. Update it
955 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
956 LIVE tracks the set of live registers; if it is null, assume that
957 every birth or death is genuine. */
958static inline void
959mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
960{
961 enum reg_class pressure_class;
962
963 pressure_class = sched_regno_pressure_class[regno];
964 if (regno >= FIRST_PSEUDO_REGISTER76)
965 {
966 if (pressure_class != NO_REGS)
967 {
968 if (birth_p)
969 {
970 if (!live || bitmap_set_bit (live, regno))
971 pressure[pressure_class]
972 += (ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)
973 [pressure_class][PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)]);
974 }
975 else
976 {
977 if (!live || bitmap_clear_bit (live, regno))
978 pressure[pressure_class]
979 -= (ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)
980 [pressure_class][PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)]);
981 }
982 }
983 }
984 else if (pressure_class != NO_REGS
985 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs), regno))
986 {
987 if (birth_p)
988 {
989 if (!live || bitmap_set_bit (live, regno))
990 pressure[pressure_class]++;
991 }
992 else
993 {
994 if (!live || bitmap_clear_bit (live, regno))
995 pressure[pressure_class]--;
996 }
997 }
998}
999
1000/* Initiate current register pressure related info from living
1001 registers given by LIVE. */
1002static void
1003initiate_reg_pressure_info (bitmap live)
1004{
1005 int i;
1006 unsigned int j;
1007 bitmap_iterator bi;
1008
1009 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
1010 curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]] = 0;
1011 bitmap_clear (curr_reg_live);
1012 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)for (bmp_iter_set_init (&(bi), (live), (0), &(j)); bmp_iter_set
(&(bi), &(j)); bmp_iter_next (&(bi), &(j)))
1013 if (sched_pressure == SCHED_PRESSURE_MODEL
1014 || current_nr_blocks == 1
1015 || bitmap_bit_p (region_ref_regs, j))
1016 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1017}
1018
1019/* Mark registers in X as mentioned in the current region. */
1020static void
1021setup_ref_regs (rtx x)
1022{
1023 int i, j;
1024 const RTX_CODEenum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
1025 const char *fmt;
1026
1027 if (REG_P (x)(((enum rtx_code) (x)->code) == REG))
1028 {
1029 bitmap_set_range (region_ref_regs, REGNO (x)(rhs_regno(x)), REG_NREGS (x)((&(x)->u.reg)->nregs));
1030 return;
1031 }
1032 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
1033 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
1034 if (fmt[i] == 'e')
1035 setup_ref_regs (XEXP (x, i)(((x)->u.fld[i]).rt_rtx));
1036 else if (fmt[i] == 'E')
1037 {
1038 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
1039 setup_ref_regs (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]));
1040 }
1041}
1042
1043/* Initiate current register pressure related info at the start of
1044 basic block BB. */
1045static void
1046initiate_bb_reg_pressure_info (basic_block bb)
1047{
1048 unsigned int i ATTRIBUTE_UNUSED__attribute__ ((__unused__));
1049 rtx_insn *insn;
1050
1051 if (current_nr_blocks > 1)
1052 FOR_BB_INSNS (bb, insn)for ((insn) = (bb)->il.x.head_; (insn) && (insn) !=
NEXT_INSN ((bb)->il.x.rtl->end_); (insn) = NEXT_INSN (
insn))
1053 if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN))
)
1054 setup_ref_regs (PATTERN (insn));
1055 initiate_reg_pressure_info (df_get_live_in (bb));
1056 if (bb_has_eh_pred (bb))
1057 for (i = 0; ; ++i)
1058 {
1059 unsigned int regno = EH_RETURN_DATA_REGNO (i)((i) <= 1 ? (i) : (~(unsigned int) 0));
1060
1061 if (regno == INVALID_REGNUM(~(unsigned int) 0))
1062 break;
1063 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1064 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1065 regno, true);
1066 }
1067}
1068
1069/* Save current register pressure related info. */
1070static void
1071save_reg_pressure (void)
1072{
1073 int i;
1074
1075 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
1076 saved_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]]
1077 = curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]];
1078 bitmap_copy (saved_reg_live, curr_reg_live);
1079}
1080
1081/* Restore saved register pressure related info. */
1082static void
1083restore_reg_pressure (void)
1084{
1085 int i;
1086
1087 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
1088 curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]]
1089 = saved_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]];
1090 bitmap_copy (curr_reg_live, saved_reg_live);
1091}
1092
1093/* Return TRUE if the register is dying after its USE. */
1094static bool
1095dying_use_p (struct reg_use_data *use)
1096{
1097 struct reg_use_data *next;
1098
1099 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1100 if (NONDEBUG_INSN_P (next->insn)((((enum rtx_code) (next->insn)->code) == INSN) || (((enum
rtx_code) (next->insn)->code) == JUMP_INSN) || (((enum
rtx_code) (next->insn)->code) == CALL_INSN))
1101 && QUEUE_INDEX (next->insn)((&h_i_d[INSN_UID (next->insn)])->queue_index) != QUEUE_SCHEDULED(-3))
1102 return false;
1103 return true;
1104}
1105
1106/* Print info about the current register pressure and its excess for
1107 each pressure class. */
1108static void
1109print_curr_reg_pressure (void)
1110{
1111 int i;
1112 enum reg_class cl;
1113
1114 fprintf (sched_dump, ";;\t");
1115 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
1116 {
1117 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i];
1118 gcc_assert (curr_reg_pressure[cl] >= 0)((void)(!(curr_reg_pressure[cl] >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1118, __FUNCTION__), 0 : 0))
;
1119 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1120 curr_reg_pressure[cl],
1121 curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1122 }
1123 fprintf (sched_dump, "\n");
1124}
1125
1126/* Determine if INSN has a condition that is clobbered if a register
1127 in SET_REGS is modified. */
1128static bool
1129cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1130{
1131 rtx pat = PATTERN (insn);
1132 gcc_assert (GET_CODE (pat) == COND_EXEC)((void)(!(((enum rtx_code) (pat)->code) == COND_EXEC) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1132, __FUNCTION__), 0 : 0))
;
1133 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))(rhs_regno(((((((pat)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx
)))
))
1134 {
1135 sd_iterator_def sd_it;
1136 dep_t dep;
1137 haifa_change_pattern (insn, ORIG_PAT (insn)((&h_i_d[INSN_UID (insn)])->orig_pat));
1138 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn), (((1) | (2)))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
1139 DEP_STATUS (dep)((dep)->status) &= ~DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
1140 TODO_SPEC (insn)((&h_i_d[INSN_UID (insn)])->todo_spec) = HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
1141 if (sched_verbose >= 2)
1142 fprintf (sched_dump,
1143 ";;\t\tdequeue insn %s because of clobbered condition\n",
1144 (*current_sched_info->print_insn) (insn, 0));
1145 return true;
1146 }
1147
1148 return false;
1149}
1150
1151/* This function should be called after modifying the pattern of INSN,
1152 to update scheduler data structures as needed. */
1153static void
1154update_insn_after_change (rtx_insn *insn)
1155{
1156 sd_iterator_def sd_it;
1157 dep_t dep;
1158
1159 dfa_clear_single_insn_cache (insn);
1160
1161 sd_it = sd_iterator_start (insn,
1162 SD_LIST_FORW(4) | SD_LIST_BACK((1) | (2)) | SD_LIST_RES_BACK(8));
1163 while (sd_iterator_cond (&sd_it, &dep))
1164 {
1165 DEP_COST (dep)((dep)->cost) = UNKNOWN_DEP_COST((int) ((unsigned int) -1 << 19));
1166 sd_iterator_next (&sd_it);
1167 }
1168
1169 /* Invalidate INSN_COST, so it'll be recalculated. */
1170 INSN_COST (insn)((&h_i_d[INSN_UID (insn)])->cost) = -1;
1171 /* Invalidate INSN_TICK, so it'll be recalculated. */
1172 INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick) = INVALID_TICK(-(max_insn_queue_index + 1));
1173
1174 /* Invalidate autoprefetch data entry. */
1175 INSN_AUTOPREF_MULTIPASS_DATA (insn)((&h_i_d[INSN_UID (insn)])->autopref_multipass_data)[0].status
1176 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1177 INSN_AUTOPREF_MULTIPASS_DATA (insn)((&h_i_d[INSN_UID (insn)])->autopref_multipass_data)[1].status
1178 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1179}
1180
1181
1182/* Two VECs, one to hold dependencies for which pattern replacements
1183 need to be applied or restored at the start of the next cycle, and
1184 another to hold an integer that is either one, to apply the
1185 corresponding replacement, or zero to restore it. */
1186static vec<dep_t> next_cycle_replace_deps;
1187static vec<int> next_cycle_apply;
1188
1189static void apply_replacement (dep_t, bool);
1190static void restore_pattern (dep_t, bool);
1191
1192/* Look at the remaining dependencies for insn NEXT, and compute and return
1193 the TODO_SPEC value we should use for it. This is called after one of
1194 NEXT's dependencies has been resolved.
1195 We also perform pattern replacements for predication, and for broken
1196 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1197 false. */
1198
1199static ds_t
1200recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1201{
1202 ds_t new_ds;
1203 sd_iterator_def sd_it;
1204 dep_t dep, modify_dep = NULLnullptr;
1205 int n_spec = 0;
1206 int n_control = 0;
1207 int n_replace = 0;
1208 bool first_p = true;
1209
1210 if (sd_lists_empty_p (next, SD_LIST_BACK((1) | (2))))
1211 /* NEXT has all its dependencies resolved. */
1212 return 0;
1213
1214 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK(1)))
1215 return HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
1216
1217 /* If NEXT is intended to sit adjacent to this instruction, we don't
1218 want to try to break any dependencies. Treat it as a HARD_DEP. */
1219 if (SCHED_GROUP_P (next)(__extension__ ({ __typeof ((next)) const _rtx = ((next)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) (
_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1219, __FUNCTION__); _rtx; })->in_struct)
)
1220 return HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
1221
1222 /* Now we've got NEXT with speculative deps only.
1223 1. Look at the deps to see what we have to do.
1224 2. Check if we can do 'todo'. */
1225 new_ds = 0;
1226
1227 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((next), (((1) | (2)))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
1228 {
1229 rtx_insn *pro = DEP_PRO (dep)((dep)->pro);
1230 ds_t ds = DEP_STATUS (dep)((dep)->status) & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET
) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) <<
BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) -
8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t
) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET
)))
;
1231
1232 if (DEBUG_INSN_P (pro)(((enum rtx_code) (pro)->code) == DEBUG_INSN) && !DEBUG_INSN_P (next)(((enum rtx_code) (next)->code) == DEBUG_INSN))
1233 continue;
1234
1235 if (ds)
1236 {
1237 n_spec++;
1238 if (first_p)
1239 {
1240 first_p = false;
1241
1242 new_ds = ds;
1243 }
1244 else
1245 new_ds = ds_merge (new_ds, ds);
1246 }
1247 else if (DEP_TYPE (dep)((dep)->type) == REG_DEP_CONTROL)
1248 {
1249 if (QUEUE_INDEX (pro)((&h_i_d[INSN_UID (pro)])->queue_index) != QUEUE_SCHEDULED(-3))
1250 {
1251 n_control++;
1252 modify_dep = dep;
1253 }
1254 DEP_STATUS (dep)((dep)->status) &= ~DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
1255 }
1256 else if (DEP_REPLACE (dep)((dep)->replace) != NULLnullptr)
1257 {
1258 if (QUEUE_INDEX (pro)((&h_i_d[INSN_UID (pro)])->queue_index) != QUEUE_SCHEDULED(-3))
1259 {
1260 n_replace++;
1261 modify_dep = dep;
1262 }
1263 DEP_STATUS (dep)((dep)->status) &= ~DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
1264 }
1265 }
1266
1267 if (n_replace > 0 && n_control == 0 && n_spec == 0)
1268 {
1269 if (!dbg_cnt (sched_breakdep))
1270 return HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
1271 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((next), (((1) | (2)))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
1272 {
1273 struct dep_replacement *desc = DEP_REPLACE (dep)((dep)->replace);
1274 if (desc != NULLnullptr)
1275 {
1276 if (desc->insn == next && !for_backtrack)
1277 {
1278 gcc_assert (n_replace == 1)((void)(!(n_replace == 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1278, __FUNCTION__), 0 : 0))
;
1279 apply_replacement (dep, true);
1280 }
1281 DEP_STATUS (dep)((dep)->status) |= DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
1282 }
1283 }
1284 return 0;
1285 }
1286
1287 else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1288 {
1289 rtx_insn *pro, *other;
1290 rtx new_pat;
1291 rtx cond = NULL_RTX(rtx) 0;
1292 bool success;
1293 rtx_insn *prev = NULLnullptr;
1294 int i;
1295 unsigned regno;
1296
1297 if ((current_sched_info->flags & DO_PREDICATION) == 0
1298 || (ORIG_PAT (next)((&h_i_d[INSN_UID (next)])->orig_pat) != NULL_RTX(rtx) 0
1299 && PREDICATED_PAT (next)((&h_i_d[INSN_UID (next)])->predicated_pat) == NULL_RTX(rtx) 0))
1300 return HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
1301
1302 pro = DEP_PRO (modify_dep)((modify_dep)->pro);
1303 other = real_insn_for_shadow (pro);
1304 if (other != NULL_RTX(rtx) 0)
1305 pro = other;
1306
1307 cond = sched_get_reverse_condition_uncached (pro);
1308 regno = REGNO (XEXP (cond, 0))(rhs_regno((((cond)->u.fld[0]).rt_rtx)));
1309
1310 /* Find the last scheduled insn that modifies the condition register.
1311 We can stop looking once we find the insn we depend on through the
1312 REG_DEP_CONTROL; if the condition register isn't modified after it,
1313 we know that it still has the right value. */
1314 if (QUEUE_INDEX (pro)((&h_i_d[INSN_UID (pro)])->queue_index) == QUEUE_SCHEDULED(-3))
1315 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)for (i = (scheduled_insns).length () - 1; (scheduled_insns).iterate
((i), &(prev)); (i)--)
1316 {
1317 HARD_REG_SET t;
1318
1319 find_all_hard_reg_sets (prev, &t, true);
1320 if (TEST_HARD_REG_BIT (t, regno))
1321 return HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
1322 if (prev == pro)
1323 break;
1324 }
1325 if (ORIG_PAT (next)((&h_i_d[INSN_UID (next)])->orig_pat) == NULL_RTX(rtx) 0)
1326 {
1327 ORIG_PAT (next)((&h_i_d[INSN_UID (next)])->orig_pat) = PATTERN (next);
1328
1329 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next))gen_rtx_fmt_ee_stat ((COND_EXEC), ((((void) 0, E_VOIDmode))),
((cond)), ((PATTERN (next))) )
;
1330 success = haifa_change_pattern (next, new_pat);
1331 if (!success)
1332 return HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
1333 PREDICATED_PAT (next)((&h_i_d[INSN_UID (next)])->predicated_pat) = new_pat;
1334 }
1335 else if (PATTERN (next) != PREDICATED_PAT (next)((&h_i_d[INSN_UID (next)])->predicated_pat))
1336 {
1337 bool success = haifa_change_pattern (next,
1338 PREDICATED_PAT (next)((&h_i_d[INSN_UID (next)])->predicated_pat));
1339 gcc_assert (success)((void)(!(success) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1339, __FUNCTION__), 0 : 0))
;
1340 }
1341 DEP_STATUS (modify_dep)((modify_dep)->status) |= DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
1342 return DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1)
;
1343 }
1344
1345 if (PREDICATED_PAT (next)((&h_i_d[INSN_UID (next)])->predicated_pat) != NULL_RTX(rtx) 0)
1346 {
1347 int tick = INSN_TICK (next)((&h_i_d[INSN_UID (next)])->tick);
1348 bool success = haifa_change_pattern (next,
1349 ORIG_PAT (next)((&h_i_d[INSN_UID (next)])->orig_pat));
1350 INSN_TICK (next)((&h_i_d[INSN_UID (next)])->tick) = tick;
1351 gcc_assert (success)((void)(!(success) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1351, __FUNCTION__), 0 : 0))
;
1352 }
1353
1354 /* We can't handle the case where there are both speculative and control
1355 dependencies, so we return HARD_DEP in such a case. Also fail if
1356 we have speculative dependencies with not enough points, or more than
1357 one control dependency. */
1358 if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1359 || (n_spec > 0
1360 /* Too few points? */
1361 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1362 || n_control > 0
1363 || n_replace > 0)
1364 return HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
1365
1366 return new_ds;
1367}
1368
1369/* Pointer to the last instruction scheduled. */
1370static rtx_insn *last_scheduled_insn;
1371
1372/* Pointer to the last nondebug instruction scheduled within the
1373 block, or the prev_head of the scheduling block. Used by
1374 rank_for_schedule, so that insns independent of the last scheduled
1375 insn will be preferred over dependent instructions. */
1376static rtx_insn *last_nondebug_scheduled_insn;
1377
1378/* Pointer that iterates through the list of unscheduled insns if we
1379 have a dbg_cnt enabled. It always points at an insn prior to the
1380 first unscheduled one. */
1381static rtx_insn *nonscheduled_insns_begin;
1382
1383/* Compute cost of executing INSN.
1384 This is the number of cycles between instruction issue and
1385 instruction results. */
1386int
1387insn_sched_cost (rtx_insn *insn)
1388{
1389 int cost;
1390
1391 if (sched_fusion)
1392 return 0;
1393
1394 if (sel_sched_p ())
1395 {
1396 if (recog_memoized (insn) < 0)
1397 return 0;
1398
1399 cost = insn_default_latency (insn);
1400 if (cost < 0)
1401 cost = 0;
1402
1403 return cost;
1404 }
1405
1406 cost = INSN_COST (insn)((&h_i_d[INSN_UID (insn)])->cost);
1407
1408 if (cost < 0)
1409 {
1410 /* A USE insn, or something else we don't need to
1411 understand. We can't pass these directly to
1412 result_ready_cost or insn_default_latency because it will
1413 trigger a fatal error for unrecognizable insns. */
1414 if (recog_memoized (insn) < 0)
1415 {
1416 INSN_COST (insn)((&h_i_d[INSN_UID (insn)])->cost) = 0;
1417 return 0;
1418 }
1419 else
1420 {
1421 cost = insn_default_latency (insn);
1422 if (cost < 0)
1423 cost = 0;
1424
1425 INSN_COST (insn)((&h_i_d[INSN_UID (insn)])->cost) = cost;
1426 }
1427 }
1428
1429 return cost;
1430}
1431
1432/* Compute cost of dependence LINK.
1433 This is the number of cycles between instruction issue and
1434 instruction results.
1435 ??? We also use this function to call recog_memoized on all insns. */
1436int
1437dep_cost_1 (dep_t link, dw_t dw)
1438{
1439 rtx_insn *insn = DEP_PRO (link)((link)->pro);
1440 rtx_insn *used = DEP_CON (link)((link)->con);
1441 int cost;
1442
1443 if (DEP_COST (link)((link)->cost) != UNKNOWN_DEP_COST((int) ((unsigned int) -1 << 19)))
1444 return DEP_COST (link)((link)->cost);
1445
1446 if (delay_htab)
1447 {
1448 struct delay_pair *delay_entry;
1449 delay_entry
1450 = delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1451 if (delay_entry)
1452 {
1453 if (delay_entry->i1 == insn)
1454 {
1455 DEP_COST (link)((link)->cost) = pair_delay (delay_entry);
1456 return DEP_COST (link)((link)->cost);
1457 }
1458 }
1459 }
1460
1461 /* A USE insn should never require the value used to be computed.
1462 This allows the computation of a function's result and parameter
1463 values to overlap the return and call. We don't care about the
1464 dependence cost when only decreasing register pressure. */
1465 if (recog_memoized (used) < 0)
1466 {
1467 cost = 0;
1468 recog_memoized (insn);
1469 }
1470 else
1471 {
1472 enum reg_note dep_type = DEP_TYPE (link)((link)->type);
1473
1474 cost = insn_sched_cost (insn);
1475
1476 if (INSN_CODE (insn)(((insn)->u.fld[5]).rt_int) >= 0)
1477 {
1478 if (dep_type == REG_DEP_ANTI)
1479 cost = 0;
1480 else if (dep_type == REG_DEP_OUTPUT)
1481 {
1482 cost = (insn_default_latency (insn)
1483 - insn_default_latency (used));
1484 if (cost <= 0)
1485 cost = 1;
1486 }
1487 else if (bypass_p (insn))
1488 cost = insn_latency (insn, used);
1489 }
1490
1491
1492 if (targetm.sched.adjust_cost)
1493 cost = targetm.sched.adjust_cost (used, (int) dep_type, insn, cost,
1494 dw);
1495
1496 if (cost < 0)
1497 cost = 0;
1498 }
1499
1500 DEP_COST (link)((link)->cost) = cost;
1501 return cost;
1502}
1503
1504/* Compute cost of dependence LINK.
1505 This is the number of cycles between instruction issue and
1506 instruction results. */
1507int
1508dep_cost (dep_t link)
1509{
1510 return dep_cost_1 (link, 0);
1511}
1512
1513/* Use this sel-sched.cc friendly function in reorder2 instead of increasing
1514 INSN_PRIORITY explicitly. */
1515void
1516increase_insn_priority (rtx_insn *insn, int amount)
1517{
1518 if (!sel_sched_p ())
1519 {
1520 /* We're dealing with haifa-sched.cc INSN_PRIORITY. */
1521 if (INSN_PRIORITY_KNOWN (insn)(((&h_i_d[INSN_UID (insn)])->priority_status) > 0))
1522 INSN_PRIORITY (insn)((&h_i_d[INSN_UID (insn)])->priority) += amount;
1523 }
1524 else
1525 {
1526 /* In sel-sched.cc INSN_PRIORITY is not kept up to date.
1527 Use EXPR_PRIORITY instead. */
1528 sel_add_to_insn_priority (insn, amount);
1529 }
1530}
1531
1532/* Return 'true' if DEP should be included in priority calculations. */
1533static bool
1534contributes_to_priority_p (dep_t dep)
1535{
1536 if (DEBUG_INSN_P (DEP_CON (dep))(((enum rtx_code) (((dep)->con))->code) == DEBUG_INSN)
1537 || DEBUG_INSN_P (DEP_PRO (dep))(((enum rtx_code) (((dep)->pro))->code) == DEBUG_INSN))
1538 return false;
1539
1540 /* Critical path is meaningful in block boundaries only. */
1541 if (!current_sched_info->contributes_to_priority (DEP_CON (dep)((dep)->con),
1542 DEP_PRO (dep)((dep)->pro)))
1543 return false;
1544
1545 if (DEP_REPLACE (dep)((dep)->replace) != NULLnullptr)
1546 return false;
1547
1548 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1549 then speculative instructions will less likely be
1550 scheduled. That is because the priority of
1551 their producers will increase, and, thus, the
1552 producers will more likely be scheduled, thus,
1553 resolving the dependence. */
1554 if (sched_deps_info->generate_spec_deps
1555 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1556 && (DEP_STATUS (dep)((dep)->status) & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET
) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) <<
BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) -
8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t
) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET
)))
))
1557 return false;
1558
1559 return true;
1560}
1561
1562/* Compute the number of nondebug deps in list LIST for INSN. */
1563
1564static int
1565dep_list_size (rtx_insn *insn, sd_list_types_def list)
1566{
1567 sd_iterator_def sd_it;
1568 dep_t dep;
1569 int dbgcount = 0, nodbgcount = 0;
1570
1571 if (!MAY_HAVE_DEBUG_INSNS(global_options.x_debug_nonbind_markers_p || global_options.x_flag_var_tracking_assignments
)
)
1572 return sd_lists_size (insn, list);
1573
1574 FOR_EACH_DEP (insn, list, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn), (list)); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
1575 {
1576 if (DEBUG_INSN_P (DEP_CON (dep))(((enum rtx_code) (((dep)->con))->code) == DEBUG_INSN))
1577 dbgcount++;
1578 else if (!DEBUG_INSN_P (DEP_PRO (dep))(((enum rtx_code) (((dep)->pro))->code) == DEBUG_INSN))
1579 nodbgcount++;
1580 }
1581
1582 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list))((void)(!(dbgcount + nodbgcount == sd_lists_size (insn, list)
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1582, __FUNCTION__), 0 : 0))
;
1583
1584 return nodbgcount;
1585}
1586
1587bool sched_fusion;
1588
1589/* Compute the priority number for INSN. */
1590static int
1591priority (rtx_insn *insn, bool force_recompute)
1592{
1593 if (! INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
)
1594 return 0;
1595
1596 /* We should not be interested in priority of an already scheduled insn. */
1597 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED)((void)(!(((&h_i_d[INSN_UID (insn)])->queue_index) != (
-3)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1597, __FUNCTION__), 0 : 0))
;
1598
1599 if (force_recompute || !INSN_PRIORITY_KNOWN (insn)(((&h_i_d[INSN_UID (insn)])->priority_status) > 0))
1600 {
1601 int this_priority = -1;
1602
1603 if (sched_fusion)
1604 {
1605 int this_fusion_priority;
1606
1607 targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY(2147483647),
1608 &this_fusion_priority, &this_priority);
1609 INSN_FUSION_PRIORITY (insn)((&h_i_d[INSN_UID (insn)])->fusion_priority) = this_fusion_priority;
1610 }
1611 else if (dep_list_size (insn, SD_LIST_FORW(4)) == 0)
1612 /* ??? We should set INSN_PRIORITY to insn_sched_cost when and insn
1613 has some forward deps but all of them are ignored by
1614 contributes_to_priority hook. At the moment we set priority of
1615 such insn to 0. */
1616 this_priority = insn_sched_cost (insn);
1617 else
1618 {
1619 rtx_insn *prev_first, *twin;
1620 basic_block rec;
1621
1622 /* For recovery check instructions we calculate priority slightly
1623 different than that of normal instructions. Instead of walking
1624 through INSN_FORW_DEPS (check) list, we walk through
1625 INSN_FORW_DEPS list of each instruction in the corresponding
1626 recovery block. */
1627
1628 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1629 rec = sel_sched_p () ? NULLnullptr : RECOVERY_BLOCK (insn)((&h_i_d[INSN_UID (insn)])->recovery_block);
1630 if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr))
1631 {
1632 prev_first = PREV_INSN (insn);
1633 twin = insn;
1634 }
1635 else
1636 {
1637 prev_first = NEXT_INSN (BB_HEAD (rec)(rec)->il.x.head_);
1638 twin = PREV_INSN (BB_END (rec)(rec)->il.x.rtl->end_);
1639 }
1640
1641 do
1642 {
1643 sd_iterator_def sd_it;
1644 dep_t dep;
1645
1646 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)for ((sd_it) = sd_iterator_start ((twin), ((4))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
1647 {
1648 rtx_insn *next;
1649 int next_priority;
1650
1651 next = DEP_CON (dep)((dep)->con);
1652
1653 if (BLOCK_FOR_INSN (next) != rec)
1654 {
1655 int cost;
1656
1657 if (!contributes_to_priority_p (dep))
1658 continue;
1659
1660 if (twin == insn)
1661 cost = dep_cost (dep);
1662 else
1663 {
1664 struct _dep _dep1, *dep1 = &_dep1;
1665
1666 init_dep (dep1, insn, next, REG_DEP_ANTI);
1667
1668 cost = dep_cost (dep1);
1669 }
1670
1671 next_priority = cost + priority (next);
1672
1673 if (next_priority > this_priority)
1674 this_priority = next_priority;
1675 }
1676 }
1677
1678 twin = PREV_INSN (twin);
1679 }
1680 while (twin != prev_first);
1681 }
1682
1683 if (this_priority < 0)
1684 {
1685 gcc_assert (this_priority == -1)((void)(!(this_priority == -1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1685, __FUNCTION__), 0 : 0))
;
1686
1687 this_priority = insn_sched_cost (insn);
1688 }
1689
1690 INSN_PRIORITY (insn)((&h_i_d[INSN_UID (insn)])->priority) = this_priority;
1691 INSN_PRIORITY_STATUS (insn)((&h_i_d[INSN_UID (insn)])->priority_status) = 1;
1692 }
1693
1694 return INSN_PRIORITY (insn)((&h_i_d[INSN_UID (insn)])->priority);
1695}
1696
1697/* Macros and functions for keeping the priority queue sorted, and
1698 dealing with queuing and dequeuing of instructions. */
1699
1700/* For each pressure class CL, set DEATH[CL] to the number of registers
1701 in that class that die in INSN. */
1702
1703static void
1704calculate_reg_deaths (rtx_insn *insn, int *death)
1705{
1706 int i;
1707 struct reg_use_data *use;
1708
1709 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
1710 death[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]] = 0;
1711 for (use = INSN_REG_USE_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_use_list); use != NULLnullptr; use = use->next_insn_use)
1712 if (dying_use_p (use))
1713 mark_regno_birth_or_death (0, death, use->regno, true);
1714}
1715
1716/* Setup info about the current register pressure impact of scheduling
1717 INSN at the current scheduling point. */
1718static void
1719setup_insn_reg_pressure_info (rtx_insn *insn)
1720{
1721 int i, change, before, after, hard_regno;
1722 int excess_cost_change;
1723 machine_mode mode;
1724 enum reg_class cl;
1725 struct reg_pressure_data *pressure_info;
1726 int *max_reg_pressure;
1727 static int death[N_REG_CLASSES((int) LIM_REG_CLASSES)];
1728
1729 gcc_checking_assert (!DEBUG_INSN_P (insn))((void)(!(!(((enum rtx_code) (insn)->code) == DEBUG_INSN))
? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1729, __FUNCTION__), 0 : 0))
;
1730
1731 excess_cost_change = 0;
1732 calculate_reg_deaths (insn, death);
1733 pressure_info = INSN_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->reg_pressure);
1734 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->max_reg_pressure);
1735 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL)((void)(!(pressure_info != nullptr && max_reg_pressure
!= nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1735, __FUNCTION__), 0 : 0))
;
1736 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
1737 {
1738 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i];
1739 gcc_assert (curr_reg_pressure[cl] >= 0)((void)(!(curr_reg_pressure[cl] >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1739, __FUNCTION__), 0 : 0))
;
1740 change = (int) pressure_info[i].set_increase - death[cl];
1741 before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl])((0) > (max_reg_pressure[i] - sched_class_regs_num[cl]) ? (
0) : (max_reg_pressure[i] - sched_class_regs_num[cl]))
;
1742 after = MAX (0, max_reg_pressure[i] + change((0) > (max_reg_pressure[i] + change - sched_class_regs_num
[cl]) ? (0) : (max_reg_pressure[i] + change - sched_class_regs_num
[cl]))
1743 - sched_class_regs_num[cl])((0) > (max_reg_pressure[i] + change - sched_class_regs_num
[cl]) ? (0) : (max_reg_pressure[i] + change - sched_class_regs_num
[cl]))
;
1744 hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[cl][0];
1745 gcc_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1745, __FUNCTION__), 0 : 0))
;
1746 mode = reg_raw_mode(this_target_regs->x_reg_raw_mode)[hard_regno];
1747 excess_cost_change += ((after - before)
1748 * (ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)[mode][cl][0]
1749 + ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)[mode][cl][1]));
1750 }
1751 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn)((&h_i_d[INSN_UID (insn)])->reg_pressure_excess_cost_change
)
= excess_cost_change;
1752}
1753
1754/* This is the first page of code related to SCHED_PRESSURE_MODEL.
1755 It tries to make the scheduler take register pressure into account
1756 without introducing too many unnecessary stalls. It hooks into the
1757 main scheduling algorithm at several points:
1758
1759 - Before scheduling starts, model_start_schedule constructs a
1760 "model schedule" for the current block. This model schedule is
1761 chosen solely to keep register pressure down. It does not take the
1762 target's pipeline or the original instruction order into account,
1763 except as a tie-breaker. It also doesn't work to a particular
1764 pressure limit.
1765
1766 This model schedule gives us an idea of what pressure can be
1767 achieved for the block and gives us an example of a schedule that
1768 keeps to that pressure. It also makes the final schedule less
1769 dependent on the original instruction order. This is important
1770 because the original order can either be "wide" (many values live
1771 at once, such as in user-scheduled code) or "narrow" (few values
1772 live at once, such as after loop unrolling, where several
1773 iterations are executed sequentially).
1774
1775 We do not apply this model schedule to the rtx stream. We simply
1776 record it in model_schedule. We also compute the maximum pressure,
1777 MP, that was seen during this schedule.
1778
1779 - Instructions are added to the ready queue even if they require
1780 a stall. The length of the stall is instead computed as:
1781
1782 MAX (INSN_TICK (INSN) - clock_var, 0)
1783
1784 (= insn_delay). This allows rank_for_schedule to choose between
1785 introducing a deliberate stall or increasing pressure.
1786
1787 - Before sorting the ready queue, model_set_excess_costs assigns
1788 a pressure-based cost to each ready instruction in the queue.
1789 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1790 (ECC for short) and is effectively measured in cycles.
1791
1792 - rank_for_schedule ranks instructions based on:
1793
1794 ECC (insn) + insn_delay (insn)
1795
1796 then as:
1797
1798 insn_delay (insn)
1799
1800 So, for example, an instruction X1 with an ECC of 1 that can issue
1801 now will win over an instruction X0 with an ECC of zero that would
1802 introduce a stall of one cycle. However, an instruction X2 with an
1803 ECC of 2 that can issue now will lose to both X0 and X1.
1804
1805 - When an instruction is scheduled, model_recompute updates the model
1806 schedule with the new pressures (some of which might now exceed the
1807 original maximum pressure MP). model_update_limit_points then searches
1808 for the new point of maximum pressure, if not already known. */
1809
1810/* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1811 from surrounding debug information. */
1812#define MODEL_BAR";;\t\t+------------------------------------------------------\n" \
1813 ";;\t\t+------------------------------------------------------\n"
1814
1815/* Information about the pressure on a particular register class at a
1816 particular point of the model schedule. */
1817struct model_pressure_data {
1818 /* The pressure at this point of the model schedule, or -1 if the
1819 point is associated with an instruction that has already been
1820 scheduled. */
1821 int ref_pressure;
1822
1823 /* The maximum pressure during or after this point of the model schedule. */
1824 int max_pressure;
1825};
1826
1827/* Per-instruction information that is used while building the model
1828 schedule. Here, "schedule" refers to the model schedule rather
1829 than the main schedule. */
1830struct model_insn_info {
1831 /* The instruction itself. */
1832 rtx_insn *insn;
1833
1834 /* If this instruction is in model_worklist, these fields link to the
1835 previous (higher-priority) and next (lower-priority) instructions
1836 in the list. */
1837 struct model_insn_info *prev;
1838 struct model_insn_info *next;
1839
1840 /* While constructing the schedule, QUEUE_INDEX describes whether an
1841 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1842 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1843 old_queue records the value that QUEUE_INDEX had before scheduling
1844 started, so that we can restore it once the schedule is complete. */
1845 int old_queue;
1846
1847 /* The relative importance of an unscheduled instruction. Higher
1848 values indicate greater importance. */
1849 unsigned int model_priority;
1850
1851 /* The length of the longest path of satisfied true dependencies
1852 that leads to this instruction. */
1853 unsigned int depth;
1854
1855 /* The length of the longest path of dependencies of any kind
1856 that leads from this instruction. */
1857 unsigned int alap;
1858
1859 /* The number of predecessor nodes that must still be scheduled. */
1860 int unscheduled_preds;
1861};
1862
1863/* Information about the pressure limit for a particular register class.
1864 This structure is used when applying a model schedule to the main
1865 schedule. */
1866struct model_pressure_limit {
1867 /* The maximum register pressure seen in the original model schedule. */
1868 int orig_pressure;
1869
1870 /* The maximum register pressure seen in the current model schedule
1871 (which excludes instructions that have already been scheduled). */
1872 int pressure;
1873
1874 /* The point of the current model schedule at which PRESSURE is first
1875 reached. It is set to -1 if the value needs to be recomputed. */
1876 int point;
1877};
1878
1879/* Describes a particular way of measuring register pressure. */
1880struct model_pressure_group {
1881 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1882 struct model_pressure_limit limits[N_REG_CLASSES((int) LIM_REG_CLASSES)];
1883
1884 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1885 on register class ira_pressure_classes[PCI] at point POINT of the
1886 current model schedule. A POINT of model_num_insns describes the
1887 pressure at the end of the schedule. */
1888 struct model_pressure_data *model;
1889};
1890
1891/* Index POINT gives the instruction at point POINT of the model schedule.
1892 This array doesn't change during main scheduling. */
1893static vec<rtx_insn *> model_schedule;
1894
1895/* The list of instructions in the model worklist, sorted in order of
1896 decreasing priority. */
1897static struct model_insn_info *model_worklist;
1898
1899/* Index I describes the instruction with INSN_LUID I. */
1900static struct model_insn_info *model_insns;
1901
1902/* The number of instructions in the model schedule. */
1903static int model_num_insns;
1904
1905/* The index of the first instruction in model_schedule that hasn't yet been
1906 added to the main schedule, or model_num_insns if all of them have. */
1907static int model_curr_point;
1908
1909/* Describes the pressure before each instruction in the model schedule. */
1910static struct model_pressure_group model_before_pressure;
1911
1912/* The first unused model_priority value (as used in model_insn_info). */
1913static unsigned int model_next_priority;
1914
1915
1916/* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1917 at point POINT of the model schedule. */
1918#define MODEL_PRESSURE_DATA(GROUP, POINT, PCI)(&(GROUP)->model[(POINT) * (this_target_ira->x_ira_pressure_classes_num
) + (PCI)])
\
1919 (&(GROUP)->model[(POINT) * ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num) + (PCI)])
1920
1921/* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1922 after point POINT of the model schedule. */
1923#define MODEL_MAX_PRESSURE(GROUP, POINT, PCI)((&(GROUP)->model[(POINT) * (this_target_ira->x_ira_pressure_classes_num
) + (PCI)])->max_pressure)
\
1924 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)(&(GROUP)->model[(POINT) * (this_target_ira->x_ira_pressure_classes_num
) + (PCI)])
->max_pressure)
1925
1926/* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1927 of the model schedule. */
1928#define MODEL_REF_PRESSURE(GROUP, POINT, PCI)((&(GROUP)->model[(POINT) * (this_target_ira->x_ira_pressure_classes_num
) + (PCI)])->ref_pressure)
\
1929 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)(&(GROUP)->model[(POINT) * (this_target_ira->x_ira_pressure_classes_num
) + (PCI)])
->ref_pressure)
1930
1931/* Information about INSN that is used when creating the model schedule. */
1932#define MODEL_INSN_INFO(INSN)(&model_insns[(sched_luids[INSN_UID (INSN)])]) \
1933 (&model_insns[INSN_LUID (INSN)(sched_luids[INSN_UID (INSN)])])
1934
1935/* The instruction at point POINT of the model schedule. */
1936#define MODEL_INSN(POINT)(model_schedule[POINT]) \
1937 (model_schedule[POINT])
1938
1939
1940/* Return INSN's index in the model schedule, or model_num_insns if it
1941 doesn't belong to that schedule. */
1942
1943static int
1944model_index (rtx_insn *insn)
1945{
1946 if (INSN_MODEL_INDEX (insn)((&h_i_d[INSN_UID (insn)])->model_index) == 0)
1947 return model_num_insns;
1948 return INSN_MODEL_INDEX (insn)((&h_i_d[INSN_UID (insn)])->model_index) - 1;
1949}
1950
1951/* Make sure that GROUP->limits is up-to-date for the current point
1952 of the model schedule. */
1953
1954static void
1955model_update_limit_points_in_group (struct model_pressure_group *group)
1956{
1957 int pci, max_pressure, point;
1958
1959 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
1960 {
1961 /* We may have passed the final point at which the pressure in
1962 group->limits[pci].pressure was reached. Update the limit if so. */
1963 max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci)((&(group)->model[(model_curr_point) * (this_target_ira
->x_ira_pressure_classes_num) + (pci)])->max_pressure)
;
1964 group->limits[pci].pressure = max_pressure;
1965
1966 /* Find the point at which MAX_PRESSURE is first reached. We need
1967 to search in three cases:
1968
1969 - We've already moved past the previous pressure point.
1970 In this case we search forward from model_curr_point.
1971
1972 - We scheduled the previous point of maximum pressure ahead of
1973 its position in the model schedule, but doing so didn't bring
1974 the pressure point earlier. In this case we search forward
1975 from that previous pressure point.
1976
1977 - Scheduling an instruction early caused the maximum pressure
1978 to decrease. In this case we will have set the pressure
1979 point to -1, and we search forward from model_curr_point. */
1980 point = MAX (group->limits[pci].point, model_curr_point)((group->limits[pci].point) > (model_curr_point) ? (group
->limits[pci].point) : (model_curr_point))
;
1981 while (point < model_num_insns
1982 && MODEL_REF_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->ref_pressure)
< max_pressure)
1983 point++;
1984 group->limits[pci].point = point;
1985
1986 gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure)((void)(!(((&(group)->model[(point) * (this_target_ira
->x_ira_pressure_classes_num) + (pci)])->ref_pressure) ==
max_pressure) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1986, __FUNCTION__), 0 : 0))
;
1987 gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure)((void)(!(((&(group)->model[(point) * (this_target_ira
->x_ira_pressure_classes_num) + (pci)])->max_pressure) ==
max_pressure) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 1987, __FUNCTION__), 0 : 0))
;
1988 }
1989}
1990
1991/* Make sure that all register-pressure limits are up-to-date for the
1992 current position in the model schedule. */
1993
1994static void
1995model_update_limit_points (void)
1996{
1997 model_update_limit_points_in_group (&model_before_pressure);
1998}
1999
2000/* Return the model_index of the last unscheduled use in chain USE
2001 outside of USE's instruction. Return -1 if there are no other uses,
2002 or model_num_insns if the register is live at the end of the block. */
2003
2004static int
2005model_last_use_except (struct reg_use_data *use)
2006{
2007 struct reg_use_data *next;
2008 int last, index;
2009
2010 last = -1;
2011 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2012 if (NONDEBUG_INSN_P (next->insn)((((enum rtx_code) (next->insn)->code) == INSN) || (((enum
rtx_code) (next->insn)->code) == JUMP_INSN) || (((enum
rtx_code) (next->insn)->code) == CALL_INSN))
2013 && QUEUE_INDEX (next->insn)((&h_i_d[INSN_UID (next->insn)])->queue_index) != QUEUE_SCHEDULED(-3))
2014 {
2015 index = model_index (next->insn);
2016 if (index == model_num_insns)
2017 return model_num_insns;
2018 if (last < index)
2019 last = index;
2020 }
2021 return last;
2022}
2023
2024/* An instruction with model_index POINT has just been scheduled, and it
2025 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2026 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2027 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2028
2029static void
2030model_start_update_pressure (struct model_pressure_group *group,
2031 int point, int pci, int delta)
2032{
2033 int next_max_pressure;
2034
2035 if (point == model_num_insns)
2036 {
2037 /* The instruction wasn't part of the model schedule; it was moved
2038 from a different block. Update the pressure for the end of
2039 the model schedule. */
2040 MODEL_REF_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->ref_pressure)
+= delta;
2041 MODEL_MAX_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
+= delta;
2042 }
2043 else
2044 {
2045 /* Record that this instruction has been scheduled. Nothing now
2046 changes between POINT and POINT + 1, so get the maximum pressure
2047 from the latter. If the maximum pressure decreases, the new
2048 pressure point may be before POINT. */
2049 MODEL_REF_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->ref_pressure)
= -1;
2050 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci)((&(group)->model[(point + 1) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
;
2051 if (MODEL_MAX_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
> next_max_pressure)
2052 {
2053 MODEL_MAX_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
= next_max_pressure;
2054 if (group->limits[pci].point == point)
2055 group->limits[pci].point = -1;
2056 }
2057 }
2058}
2059
2060/* Record that scheduling a later instruction has changed the pressure
2061 at point POINT of the model schedule by DELTA (which might be 0).
2062 Update GROUP accordingly. Return nonzero if these changes might
2063 trigger changes to previous points as well. */
2064
2065static int
2066model_update_pressure (struct model_pressure_group *group,
2067 int point, int pci, int delta)
2068{
2069 int ref_pressure, max_pressure, next_max_pressure;
2070
2071 /* If POINT hasn't yet been scheduled, update its pressure. */
2072 ref_pressure = MODEL_REF_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->ref_pressure)
;
2073 if (ref_pressure >= 0 && delta != 0)
2074 {
2075 ref_pressure += delta;
2076 MODEL_REF_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->ref_pressure)
= ref_pressure;
2077
2078 /* Check whether the maximum pressure in the overall schedule
2079 has increased. (This means that the MODEL_MAX_PRESSURE of
2080 every point <= POINT will need to increase too; see below.) */
2081 if (group->limits[pci].pressure < ref_pressure)
2082 group->limits[pci].pressure = ref_pressure;
2083
2084 /* If we are at maximum pressure, and the maximum pressure
2085 point was previously unknown or later than POINT,
2086 bring it forward. */
2087 if (group->limits[pci].pressure == ref_pressure
2088 && !IN_RANGE (group->limits[pci].point, 0, point)((unsigned long) (group->limits[pci].point) - (unsigned long
) (0) <= (unsigned long) (point) - (unsigned long) (0))
)
2089 group->limits[pci].point = point;
2090
2091 /* If POINT used to be the point of maximum pressure, but isn't
2092 any longer, we need to recalculate it using a forward walk. */
2093 if (group->limits[pci].pressure > ref_pressure
2094 && group->limits[pci].point == point)
2095 group->limits[pci].point = -1;
2096 }
2097
2098 /* Update the maximum pressure at POINT. Changes here might also
2099 affect the maximum pressure at POINT - 1. */
2100 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci)((&(group)->model[(point + 1) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
;
2101 max_pressure = MAX (ref_pressure, next_max_pressure)((ref_pressure) > (next_max_pressure) ? (ref_pressure) : (
next_max_pressure))
;
2102 if (MODEL_MAX_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
!= max_pressure)
2103 {
2104 MODEL_MAX_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
= max_pressure;
2105 return 1;
2106 }
2107 return 0;
2108}
2109
2110/* INSN has just been scheduled. Update the model schedule accordingly. */
2111
2112static void
2113model_recompute (rtx_insn *insn)
2114{
2115 struct {
2116 int last_use;
2117 int regno;
2118 } uses[FIRST_PSEUDO_REGISTER76 + MAX_RECOG_OPERANDS30];
2119 struct reg_use_data *use;
2120 struct reg_pressure_data *reg_pressure;
2121 int delta[N_REG_CLASSES((int) LIM_REG_CLASSES)];
2122 int pci, point, mix, new_last, cl, ref_pressure, queue;
2123 unsigned int i, num_uses, num_pending_births;
2124 bool print_p;
2125
2126 /* The destinations of INSN were previously live from POINT onwards, but are
2127 now live from model_curr_point onwards. Set up DELTA accordingly. */
2128 point = model_index (insn);
2129 reg_pressure = INSN_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->reg_pressure);
2130 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
2131 {
2132 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
2133 delta[cl] = reg_pressure[pci].set_increase;
2134 }
2135
2136 /* Record which registers previously died at POINT, but which now die
2137 before POINT. Adjust DELTA so that it represents the effect of
2138 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2139 registers that will be born in the range [model_curr_point, POINT). */
2140 num_uses = 0;
2141 num_pending_births = 0;
2142 bitmap_clear (tmp_bitmap);
2143 for (use = INSN_REG_USE_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_use_list); use != NULLnullptr; use = use->next_insn_use)
2144 {
2145 new_last = model_last_use_except (use);
2146 if (new_last < point && bitmap_set_bit (tmp_bitmap, use->regno))
2147 {
2148 gcc_assert (num_uses < ARRAY_SIZE (uses))((void)(!(num_uses < (sizeof (uses) / sizeof ((uses)[0])))
? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2148, __FUNCTION__), 0 : 0))
;
2149 uses[num_uses].last_use = new_last;
2150 uses[num_uses].regno = use->regno;
2151 /* This register is no longer live after POINT - 1. */
2152 mark_regno_birth_or_death (NULLnullptr, delta, use->regno, false);
2153 num_uses++;
2154 if (new_last >= 0)
2155 num_pending_births++;
2156 }
2157 }
2158
2159 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2160 Also set each group pressure limit for POINT. */
2161 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
2162 {
2163 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
2164 model_start_update_pressure (&model_before_pressure,
2165 point, pci, delta[cl]);
2166 }
2167
2168 /* Walk the model schedule backwards, starting immediately before POINT. */
2169 print_p = false;
2170 if (point != model_curr_point)
2171 do
2172 {
2173 point--;
2174 insn = MODEL_INSN (point)(model_schedule[point]);
2175 queue = QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index);
2176
2177 if (queue != QUEUE_SCHEDULED(-3))
2178 {
2179 /* DELTA describes the effect of the move on the register pressure
2180 after POINT. Make it describe the effect on the pressure
2181 before POINT. */
2182 i = 0;
2183 while (i < num_uses)
2184 {
2185 if (uses[i].last_use == point)
2186 {
2187 /* This register is now live again. */
2188 mark_regno_birth_or_death (NULLnullptr, delta,
2189 uses[i].regno, true);
2190
2191 /* Remove this use from the array. */
2192 uses[i] = uses[num_uses - 1];
2193 num_uses--;
2194 num_pending_births--;
2195 }
2196 else
2197 i++;
2198 }
2199
2200 if (sched_verbose >= 5)
2201 {
2202 if (!print_p)
2203 {
2204 fprintf (sched_dump, MODEL_BAR";;\t\t+------------------------------------------------------\n");
2205 fprintf (sched_dump, ";;\t\t| New pressure for model"
2206 " schedule\n");
2207 fprintf (sched_dump, MODEL_BAR";;\t\t+------------------------------------------------------\n");
2208 print_p = true;
2209 }
2210
2211 fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2212 point, INSN_UID (insn),
2213 str_pattern_slim (PATTERN (insn)));
2214 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
2215 {
2216 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
2217 ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,((&(&model_before_pressure)->model[(point) * (this_target_ira
->x_ira_pressure_classes_num) + (pci)])->ref_pressure)
2218 point, pci)((&(&model_before_pressure)->model[(point) * (this_target_ira
->x_ira_pressure_classes_num) + (pci)])->ref_pressure)
;
2219 fprintf (sched_dump, " %s:[%d->%d]",
2220 reg_class_names[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci]],
2221 ref_pressure, ref_pressure + delta[cl]);
2222 }
2223 fprintf (sched_dump, "\n");
2224 }
2225 }
2226
2227 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2228 might have changed as well. */
2229 mix = num_pending_births;
2230 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
2231 {
2232 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
2233 mix |= delta[cl];
2234 mix |= model_update_pressure (&model_before_pressure,
2235 point, pci, delta[cl]);
2236 }
2237 }
2238 while (mix && point > model_curr_point);
2239
2240 if (print_p)
2241 fprintf (sched_dump, MODEL_BAR";;\t\t+------------------------------------------------------\n");
2242}
2243
2244/* After DEP, which was cancelled, has been resolved for insn NEXT,
2245 check whether the insn's pattern needs restoring. */
2246static bool
2247must_restore_pattern_p (rtx_insn *next, dep_t dep)
2248{
2249 if (QUEUE_INDEX (next)((&h_i_d[INSN_UID (next)])->queue_index) == QUEUE_SCHEDULED(-3))
2250 return false;
2251
2252 if (DEP_TYPE (dep)((dep)->type) == REG_DEP_CONTROL)
2253 {
2254 gcc_assert (ORIG_PAT (next) != NULL_RTX)((void)(!(((&h_i_d[INSN_UID (next)])->orig_pat) != (rtx
) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2254, __FUNCTION__), 0 : 0))
;
2255 gcc_assert (next == DEP_CON (dep))((void)(!(next == ((dep)->con)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2255, __FUNCTION__), 0 : 0))
;
2256 }
2257 else
2258 {
2259 struct dep_replacement *desc = DEP_REPLACE (dep)((dep)->replace);
2260 if (desc->insn != next)
2261 {
2262 gcc_assert (*desc->loc == desc->orig)((void)(!(*desc->loc == desc->orig) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2262, __FUNCTION__), 0 : 0))
;
2263 return false;
2264 }
2265 }
2266 return true;
2267}
2268
2269/* model_spill_cost (CL, P, P') returns the cost of increasing the
2270 pressure on CL from P to P'. We use this to calculate a "base ECC",
2271 baseECC (CL, X), for each pressure class CL and each instruction X.
2272 Supposing X changes the pressure on CL from P to P', and that the
2273 maximum pressure on CL in the current model schedule is MP', then:
2274
2275 * if X occurs before or at the next point of maximum pressure in
2276 the model schedule and P' > MP', then:
2277
2278 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2279
2280 The idea is that the pressure after scheduling a fixed set of
2281 instructions -- in this case, the set up to and including the
2282 next maximum pressure point -- is going to be the same regardless
2283 of the order; we simply want to keep the intermediate pressure
2284 under control. Thus X has a cost of zero unless scheduling it
2285 now would exceed MP'.
2286
2287 If all increases in the set are by the same amount, no zero-cost
2288 instruction will ever cause the pressure to exceed MP'. However,
2289 if X is instead moved past an instruction X' with pressure in the
2290 range (MP' - (P' - P), MP'), the pressure at X' will increase
2291 beyond MP'. Since baseECC is very much a heuristic anyway,
2292 it doesn't seem worth the overhead of tracking cases like these.
2293
2294 The cost of exceeding MP' is always based on the original maximum
2295 pressure MP. This is so that going 2 registers over the original
2296 limit has the same cost regardless of whether it comes from two
2297 separate +1 deltas or from a single +2 delta.
2298
2299 * if X occurs after the next point of maximum pressure in the model
2300 schedule and P' > P, then:
2301
2302 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2303
2304 That is, if we move X forward across a point of maximum pressure,
2305 and if X increases the pressure by P' - P, then we conservatively
2306 assume that scheduling X next would increase the maximum pressure
2307 by P' - P. Again, the cost of doing this is based on the original
2308 maximum pressure MP, for the same reason as above.
2309
2310 * if P' < P, P > MP, and X occurs at or after the next point of
2311 maximum pressure, then:
2312
2313 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2314
2315 That is, if we have already exceeded the original maximum pressure MP,
2316 and if X might reduce the maximum pressure again -- or at least push
2317 it further back, and thus allow more scheduling freedom -- it is given
2318 a negative cost to reflect the improvement.
2319
2320 * otherwise,
2321
2322 baseECC (CL, X) = 0
2323
2324 In this case, X is not expected to affect the maximum pressure MP',
2325 so it has zero cost.
2326
2327 We then create a combined value baseECC (X) that is the sum of
2328 baseECC (CL, X) for each pressure class CL.
2329
2330 baseECC (X) could itself be used as the ECC value described above.
2331 However, this is often too conservative, in the sense that it
2332 tends to make high-priority instructions that increase pressure
2333 wait too long in cases where introducing a spill would be better.
2334 For this reason the final ECC is a priority-adjusted form of
2335 baseECC (X). Specifically, we calculate:
2336
2337 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2338 baseP = MAX { P (X) | baseECC (X) <= 0 }
2339
2340 Then:
2341
2342 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2343
2344 Thus an instruction's effect on pressure is ignored if it has a high
2345 enough priority relative to the ones that don't increase pressure.
2346 Negative values of baseECC (X) do not increase the priority of X
2347 itself, but they do make it harder for other instructions to
2348 increase the pressure further.
2349
2350 This pressure cost is deliberately timid. The intention has been
2351 to choose a heuristic that rarely interferes with the normal list
2352 scheduler in cases where that scheduler would produce good code.
2353 We simply want to curb some of its worst excesses. */
2354
2355/* Return the cost of increasing the pressure in class CL from FROM to TO.
2356
2357 Here we use the very simplistic cost model that every register above
2358 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2359 measures instead, such as one based on MEMORY_MOVE_COST. However:
2360
2361 (1) In order for an instruction to be scheduled, the higher cost
2362 would need to be justified in a single saving of that many stalls.
2363 This is overly pessimistic, because the benefit of spilling is
2364 often to avoid a sequence of several short stalls rather than
2365 a single long one.
2366
2367 (2) The cost is still arbitrary. Because we are not allocating
2368 registers during scheduling, we have no way of knowing for
2369 sure how many memory accesses will be required by each spill,
2370 where the spills will be placed within the block, or even
2371 which block(s) will contain the spills.
2372
2373 So a higher cost than 1 is often too conservative in practice,
2374 forcing blocks to contain unnecessary stalls instead of spill code.
2375 The simple cost below seems to be the best compromise. It reduces
2376 the interference with the normal list scheduler, which helps make
2377 it more suitable for a default-on option. */
2378
2379static int
2380model_spill_cost (int cl, int from, int to)
2381{
2382 from = MAX (from, sched_class_regs_num[cl])((from) > (sched_class_regs_num[cl]) ? (from) : (sched_class_regs_num
[cl]))
;
2383 return MAX (to, from)((to) > (from) ? (to) : (from)) - from;
2384}
2385
2386/* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2387 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2388 P' = P + DELTA. */
2389
2390static int
2391model_excess_group_cost (struct model_pressure_group *group,
2392 int point, int pci, int delta)
2393{
2394 int pressure, cl;
2395
2396 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
2397 if (delta < 0 && point >= group->limits[pci].point)
2398 {
2399 pressure = MAX (group->limits[pci].orig_pressure,((group->limits[pci].orig_pressure) > (curr_reg_pressure
[cl] + delta) ? (group->limits[pci].orig_pressure) : (curr_reg_pressure
[cl] + delta))
2400 curr_reg_pressure[cl] + delta)((group->limits[pci].orig_pressure) > (curr_reg_pressure
[cl] + delta) ? (group->limits[pci].orig_pressure) : (curr_reg_pressure
[cl] + delta))
;
2401 return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2402 }
2403
2404 if (delta > 0)
2405 {
2406 if (point > group->limits[pci].point)
2407 pressure = group->limits[pci].pressure + delta;
2408 else
2409 pressure = curr_reg_pressure[cl] + delta;
2410
2411 if (pressure > group->limits[pci].pressure)
2412 return model_spill_cost (cl, group->limits[pci].orig_pressure,
2413 pressure);
2414 }
2415
2416 return 0;
2417}
2418
2419/* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2420 if PRINT_P. */
2421
2422static int
2423model_excess_cost (rtx_insn *insn, bool print_p)
2424{
2425 int point, pci, cl, cost, this_cost, delta;
2426 struct reg_pressure_data *insn_reg_pressure;
2427 int insn_death[N_REG_CLASSES((int) LIM_REG_CLASSES)];
2428
2429 calculate_reg_deaths (insn, insn_death);
2430 point = model_index (insn);
2431 insn_reg_pressure = INSN_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->reg_pressure);
2432 cost = 0;
2433
2434 if (print_p)
2435 fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2436 INSN_UID (insn), INSN_PRIORITY (insn)((&h_i_d[INSN_UID (insn)])->priority), insn_delay (insn));
2437
2438 /* Sum up the individual costs for each register class. */
2439 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
2440 {
2441 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
2442 delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2443 this_cost = model_excess_group_cost (&model_before_pressure,
2444 point, pci, delta);
2445 cost += this_cost;
2446 if (print_p)
2447 fprintf (sched_dump, " %s:[%d base cost %d]",
2448 reg_class_names[cl], delta, this_cost);
2449 }
2450
2451 if (print_p)
2452 fprintf (sched_dump, "\n");
2453
2454 return cost;
2455}
2456
2457/* Dump the next points of maximum pressure for GROUP. */
2458
2459static void
2460model_dump_pressure_points (struct model_pressure_group *group)
2461{
2462 int pci, cl;
2463
2464 fprintf (sched_dump, ";;\t\t| pressure points");
2465 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
2466 {
2467 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
2468 fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2469 curr_reg_pressure[cl], group->limits[pci].pressure);
2470 if (group->limits[pci].point < model_num_insns)
2471 fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2472 INSN_UID (MODEL_INSN (group->limits[pci].point)(model_schedule[group->limits[pci].point])));
2473 else
2474 fprintf (sched_dump, "end]");
2475 }
2476 fprintf (sched_dump, "\n");
2477}
2478
2479/* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2480
2481static void
2482model_set_excess_costs (rtx_insn **insns, int count)
2483{
2484 int i, cost, priority_base, priority;
2485 bool print_p;
2486
2487 /* Record the baseECC value for each instruction in the model schedule,
2488 except that negative costs are converted to zero ones now rather than
2489 later. Do not assign a cost to debug instructions, since they must
2490 not change code-generation decisions. Experiments suggest we also
2491 get better results by not assigning a cost to instructions from
2492 a different block.
2493
2494 Set PRIORITY_BASE to baseP in the block comment above. This is the
2495 maximum priority of the "cheap" instructions, which should always
2496 include the next model instruction. */
2497 priority_base = 0;
2498 print_p = false;
2499 for (i = 0; i < count; i++)
2500 if (INSN_MODEL_INDEX (insns[i])((&h_i_d[INSN_UID (insns[i])])->model_index))
2501 {
2502 if (sched_verbose >= 6 && !print_p)
2503 {
2504 fprintf (sched_dump, MODEL_BAR";;\t\t+------------------------------------------------------\n");
2505 fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2506 model_dump_pressure_points (&model_before_pressure);
2507 fprintf (sched_dump, MODEL_BAR";;\t\t+------------------------------------------------------\n");
2508 print_p = true;
2509 }
2510 cost = model_excess_cost (insns[i], print_p);
2511 if (cost <= 0)
2512 {
2513 priority = INSN_PRIORITY (insns[i])((&h_i_d[INSN_UID (insns[i])])->priority) - insn_delay (insns[i]) - cost;
2514 priority_base = MAX (priority_base, priority)((priority_base) > (priority) ? (priority_base) : (priority
))
;
2515 cost = 0;
2516 }
2517 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i])((&h_i_d[INSN_UID (insns[i])])->reg_pressure_excess_cost_change
)
= cost;
2518 }
2519 if (print_p)
2520 fprintf (sched_dump, MODEL_BAR";;\t\t+------------------------------------------------------\n");
2521
2522 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2523 instruction. */
2524 for (i = 0; i < count; i++)
2525 {
2526 cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i])((&h_i_d[INSN_UID (insns[i])])->reg_pressure_excess_cost_change
)
;
2527 priority = INSN_PRIORITY (insns[i])((&h_i_d[INSN_UID (insns[i])])->priority) - insn_delay (insns[i]);
2528 if (cost > 0 && priority > priority_base)
2529 {
2530 cost += priority_base - priority;
2531 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i])((&h_i_d[INSN_UID (insns[i])])->reg_pressure_excess_cost_change
)
= MAX (cost, 0)((cost) > (0) ? (cost) : (0));
2532 }
2533 }
2534}
2535
2536
2537/* Enum of rank_for_schedule heuristic decisions. */
2538enum rfs_decision {
2539 RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2540 RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2541 RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_AUTOPREF, RFS_SPECULATION,
2542 RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2543 RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_COST, RFS_N };
2544
2545/* Corresponding strings for print outs. */
2546static const char *rfs_str[RFS_N] = {
2547 "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2548 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2549 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_AUTOPREF", "RFS_SPECULATION",
2550 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2551 "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" };
2552
2553/* Statistical breakdown of rank_for_schedule decisions. */
2554struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; };
2555static rank_for_schedule_stats_t rank_for_schedule_stats;
2556
2557/* Return the result of comparing insns TMP and TMP2 and update
2558 Rank_For_Schedule statistics. */
2559static int
2560rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2561{
2562 ++rank_for_schedule_stats.stats[decision];
2563 if (result < 0)
2564 INSN_LAST_RFS_WIN (tmp)((&h_i_d[INSN_UID (tmp)])->last_rfs_win) = decision;
2565 else if (result > 0)
2566 INSN_LAST_RFS_WIN (tmp2)((&h_i_d[INSN_UID (tmp2)])->last_rfs_win) = decision;
2567 else
2568 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2568, __FUNCTION__))
;
2569 return result;
2570}
2571
2572/* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2573 keeping normal insns in original order. */
2574
2575static int
2576rank_for_schedule_debug (const void *x, const void *y)
2577{
2578 rtx_insn *tmp = *(rtx_insn * const *) y;
2579 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2580
2581 /* Schedule debug insns as early as possible. */
2582 if (DEBUG_INSN_P (tmp)(((enum rtx_code) (tmp)->code) == DEBUG_INSN) && !DEBUG_INSN_P (tmp2)(((enum rtx_code) (tmp2)->code) == DEBUG_INSN))
2583 return -1;
2584 else if (!DEBUG_INSN_P (tmp)(((enum rtx_code) (tmp)->code) == DEBUG_INSN) && DEBUG_INSN_P (tmp2)(((enum rtx_code) (tmp2)->code) == DEBUG_INSN))
2585 return 1;
2586 else if (DEBUG_INSN_P (tmp)(((enum rtx_code) (tmp)->code) == DEBUG_INSN) && DEBUG_INSN_P (tmp2)(((enum rtx_code) (tmp2)->code) == DEBUG_INSN))
2587 return INSN_LUID (tmp)(sched_luids[INSN_UID (tmp)]) - INSN_LUID (tmp2)(sched_luids[INSN_UID (tmp2)]);
2588 else
2589 return INSN_RFS_DEBUG_ORIG_ORDER (tmp2)((&h_i_d[INSN_UID (tmp2)])->rfs_debug_orig_order) - INSN_RFS_DEBUG_ORIG_ORDER (tmp)((&h_i_d[INSN_UID (tmp)])->rfs_debug_orig_order);
2590}
2591
2592/* Returns a positive value if x is preferred; returns a negative value if
2593 y is preferred. Should never return 0, since that will make the sort
2594 unstable. */
2595
2596static int
2597rank_for_schedule (const void *x, const void *y)
2598{
2599 rtx_insn *tmp = *(rtx_insn * const *) y;
2600 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2601 int tmp_class, tmp2_class;
2602 int val, priority_val, info_val, diff;
2603
2604 if (live_range_shrinkage_p)
2605 {
2606 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2607 code. */
2608 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED)((void)(!(sched_pressure == SCHED_PRESSURE_WEIGHTED) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2608, __FUNCTION__), 0 : 0))
;
2609 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)((&h_i_d[INSN_UID (tmp)])->reg_pressure_excess_cost_change
)
< 0
2610 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)((&h_i_d[INSN_UID (tmp2)])->reg_pressure_excess_cost_change
)
< 0)
2611 && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)((&h_i_d[INSN_UID (tmp)])->reg_pressure_excess_cost_change
)
2612 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)((&h_i_d[INSN_UID (tmp2)])->reg_pressure_excess_cost_change
)
)) != 0)
2613 return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2614 /* Sort by INSN_LUID (original insn order), so that we make the
2615 sort stable. This minimizes instruction movement, thus
2616 minimizing sched's effect on debugging and cross-jumping. */
2617 return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2618 INSN_LUID (tmp)(sched_luids[INSN_UID (tmp)]) - INSN_LUID (tmp2)(sched_luids[INSN_UID (tmp2)]), tmp, tmp2);
2619 }
2620
2621 /* The insn in a schedule group should be issued the first. */
2622 if (flag_sched_group_heuristicglobal_options.x_flag_sched_group_heuristic &&
2623 SCHED_GROUP_P (tmp)(__extension__ ({ __typeof ((tmp)) const _rtx = ((tmp)); if (
((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((
enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) (
_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2623, __FUNCTION__); _rtx; })->in_struct)
!= SCHED_GROUP_P (tmp2)(__extension__ ({ __typeof ((tmp2)) const _rtx = ((tmp2)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) (
_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2623, __FUNCTION__); _rtx; })->in_struct)
)
2624 return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2)(__extension__ ({ __typeof ((tmp2)) const _rtx = ((tmp2)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) (
_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2624, __FUNCTION__); _rtx; })->in_struct)
? 1 : -1,
2625 tmp, tmp2);
2626
2627 /* Make sure that priority of TMP and TMP2 are initialized. */
2628 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2))((void)(!((((&h_i_d[INSN_UID (tmp)])->priority_status)
> 0) && (((&h_i_d[INSN_UID (tmp2)])->priority_status
) > 0)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2628, __FUNCTION__), 0 : 0))
;
2629
2630 if (sched_fusion)
2631 {
2632 /* The instruction that has the same fusion priority as the last
2633 instruction is the instruction we picked next. If that is not
2634 the case, we sort ready list firstly by fusion priority, then
2635 by priority, and at last by INSN_LUID. */
2636 int a = INSN_FUSION_PRIORITY (tmp)((&h_i_d[INSN_UID (tmp)])->fusion_priority);
2637 int b = INSN_FUSION_PRIORITY (tmp2)((&h_i_d[INSN_UID (tmp2)])->fusion_priority);
2638 int last = -1;
2639
2640 if (last_nondebug_scheduled_insn
2641 && !NOTE_P (last_nondebug_scheduled_insn)(((enum rtx_code) (last_nondebug_scheduled_insn)->code) ==
NOTE)
2642 && BLOCK_FOR_INSN (tmp)
2643 == BLOCK_FOR_INSN (last_nondebug_scheduled_insn))
2644 last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn)((&h_i_d[INSN_UID (last_nondebug_scheduled_insn)])->fusion_priority
)
;
2645
2646 if (a != last && b != last)
2647 {
2648 if (a == b)
2649 {
2650 a = INSN_PRIORITY (tmp)((&h_i_d[INSN_UID (tmp)])->priority);
2651 b = INSN_PRIORITY (tmp2)((&h_i_d[INSN_UID (tmp2)])->priority);
2652 }
2653 if (a != b)
2654 return rfs_result (RFS_FUSION, b - a, tmp, tmp2);
2655 else
2656 return rfs_result (RFS_FUSION,
2657 INSN_LUID (tmp)(sched_luids[INSN_UID (tmp)]) - INSN_LUID (tmp2)(sched_luids[INSN_UID (tmp2)]), tmp, tmp2);
2658 }
2659 else if (a == b)
2660 {
2661 gcc_assert (last_nondebug_scheduled_insn((void)(!(last_nondebug_scheduled_insn && !(((enum rtx_code
) (last_nondebug_scheduled_insn)->code) == NOTE)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2662, __FUNCTION__), 0 : 0))
2662 && !NOTE_P (last_nondebug_scheduled_insn))((void)(!(last_nondebug_scheduled_insn && !(((enum rtx_code
) (last_nondebug_scheduled_insn)->code) == NOTE)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2662, __FUNCTION__), 0 : 0))
;
2663 last = INSN_PRIORITY (last_nondebug_scheduled_insn)((&h_i_d[INSN_UID (last_nondebug_scheduled_insn)])->priority
)
;
2664
2665 a = abs (INSN_PRIORITY (tmp)((&h_i_d[INSN_UID (tmp)])->priority) - last);
2666 b = abs (INSN_PRIORITY (tmp2)((&h_i_d[INSN_UID (tmp2)])->priority) - last);
2667 if (a != b)
2668 return rfs_result (RFS_FUSION, a - b, tmp, tmp2);
2669 else
2670 return rfs_result (RFS_FUSION,
2671 INSN_LUID (tmp)(sched_luids[INSN_UID (tmp)]) - INSN_LUID (tmp2)(sched_luids[INSN_UID (tmp2)]), tmp, tmp2);
2672 }
2673 else if (a == last)
2674 return rfs_result (RFS_FUSION, -1, tmp, tmp2);
2675 else
2676 return rfs_result (RFS_FUSION, 1, tmp, tmp2);
2677 }
2678
2679 if (sched_pressure != SCHED_PRESSURE_NONE)
2680 {
2681 /* Prefer insn whose scheduling results in the smallest register
2682 pressure excess. */
2683 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)((&h_i_d[INSN_UID (tmp)])->reg_pressure_excess_cost_change
)
2684 + insn_delay (tmp)
2685 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)((&h_i_d[INSN_UID (tmp2)])->reg_pressure_excess_cost_change
)
2686 - insn_delay (tmp2))))
2687 return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2688 }
2689
2690 if (sched_pressure != SCHED_PRESSURE_NONE
2691 && (INSN_TICK (tmp2)((&h_i_d[INSN_UID (tmp2)])->tick) > clock_var || INSN_TICK (tmp)((&h_i_d[INSN_UID (tmp)])->tick) > clock_var)
2692 && INSN_TICK (tmp2)((&h_i_d[INSN_UID (tmp2)])->tick) != INSN_TICK (tmp)((&h_i_d[INSN_UID (tmp)])->tick))
2693 {
2694 diff = INSN_TICK (tmp)((&h_i_d[INSN_UID (tmp)])->tick) - INSN_TICK (tmp2)((&h_i_d[INSN_UID (tmp2)])->tick);
2695 return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2696 }
2697
2698 /* If we are doing backtracking in this schedule, prefer insns that
2699 have forward dependencies with negative cost against an insn that
2700 was already scheduled. */
2701 if (current_sched_info->flags & DO_BACKTRACKING)
2702 {
2703 priority_val = FEEDS_BACKTRACK_INSN (tmp2)((&h_i_d[INSN_UID (tmp2)])->feeds_backtrack_insn) - FEEDS_BACKTRACK_INSN (tmp)((&h_i_d[INSN_UID (tmp)])->feeds_backtrack_insn);
2704 if (priority_val)
2705 return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2706 }
2707
2708 /* Prefer insn with higher priority. */
2709 priority_val = INSN_PRIORITY (tmp2)((&h_i_d[INSN_UID (tmp2)])->priority) - INSN_PRIORITY (tmp)((&h_i_d[INSN_UID (tmp)])->priority);
2710
2711 if (flag_sched_critical_path_heuristicglobal_options.x_flag_sched_critical_path_heuristic && priority_val)
2712 return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2713
2714 if (param_sched_autopref_queue_depthglobal_options.x_param_sched_autopref_queue_depth >= 0)
2715 {
2716 int autopref = autopref_rank_for_schedule (tmp, tmp2);
2717 if (autopref != 0)
2718 return rfs_result (RFS_AUTOPREF, autopref, tmp, tmp2);
2719 }
2720
2721 /* Prefer speculative insn with greater dependencies weakness. */
2722 if (flag_sched_spec_insn_heuristicglobal_options.x_flag_sched_spec_insn_heuristic && spec_info)
2723 {
2724 ds_t ds1, ds2;
2725 dw_t dw1, dw2;
2726 int dw;
2727
2728 ds1 = TODO_SPEC (tmp)((&h_i_d[INSN_UID (tmp)])->todo_spec) & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET
) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) <<
BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) -
8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t
) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET
)))
;
2729 if (ds1)
2730 dw1 = ds_weak (ds1);
2731 else
2732 dw1 = NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1);
2733
2734 ds2 = TODO_SPEC (tmp2)((&h_i_d[INSN_UID (tmp2)])->todo_spec) & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET
) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) <<
BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) -
8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t
) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET
)))
;
2735 if (ds2)
2736 dw2 = ds_weak (ds2);
2737 else
2738 dw2 = NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1);
2739
2740 dw = dw2 - dw1;
2741 if (dw > (NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1) / 8) || dw < -(NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1) / 8))
2742 return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2743 }
2744
2745 info_val = (*current_sched_info->rank) (tmp, tmp2);
2746 if (flag_sched_rank_heuristicglobal_options.x_flag_sched_rank_heuristic && info_val)
2747 return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2748
2749 /* Compare insns based on their relation to the last scheduled
2750 non-debug insn. */
2751 if (flag_sched_last_insn_heuristicglobal_options.x_flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2752 {
2753 dep_t dep1;
2754 dep_t dep2;
2755 rtx_insn *last = last_nondebug_scheduled_insn;
2756
2757 /* Classify the instructions into three classes:
2758 1) Data dependent on last schedule insn.
2759 2) Anti/Output dependent on last scheduled insn.
2760 3) Independent of last scheduled insn, or has latency of one.
2761 Choose the insn from the highest numbered class if different. */
2762 dep1 = sd_find_dep_between (last, tmp, true);
2763
2764 if (dep1 == NULLnullptr || dep_cost (dep1) == 1)
2765 tmp_class = 3;
2766 else if (/* Data dependence. */
2767 DEP_TYPE (dep1)((dep1)->type) == REG_DEP_TRUE)
2768 tmp_class = 1;
2769 else
2770 tmp_class = 2;
2771
2772 dep2 = sd_find_dep_between (last, tmp2, true);
2773
2774 if (dep2 == NULLnullptr || dep_cost (dep2) == 1)
2775 tmp2_class = 3;
2776 else if (/* Data dependence. */
2777 DEP_TYPE (dep2)((dep2)->type) == REG_DEP_TRUE)
2778 tmp2_class = 1;
2779 else
2780 tmp2_class = 2;
2781
2782 if ((val = tmp2_class - tmp_class))
2783 return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2784 }
2785
2786 /* Prefer instructions that occur earlier in the model schedule. */
2787 if (sched_pressure == SCHED_PRESSURE_MODEL)
2788 {
2789 diff = model_index (tmp) - model_index (tmp2);
2790 if (diff != 0)
2791 return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2792 }
2793
2794 /* Prefer the insn which has more later insns that depend on it.
2795 This gives the scheduler more freedom when scheduling later
2796 instructions at the expense of added register pressure. */
2797
2798 val = (dep_list_size (tmp2, SD_LIST_FORW(4))
2799 - dep_list_size (tmp, SD_LIST_FORW(4)));
2800
2801 if (flag_sched_dep_count_heuristicglobal_options.x_flag_sched_dep_count_heuristic && val != 0)
2802 return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2803
2804 /* Sort by INSN_COST rather than INSN_LUID. This means that instructions
2805 which take longer to execute are prioritised and it leads to more
2806 dual-issue opportunities on in-order cores which have this feature. */
2807
2808 if (INSN_COST (tmp)((&h_i_d[INSN_UID (tmp)])->cost) != INSN_COST (tmp2)((&h_i_d[INSN_UID (tmp2)])->cost))
2809 return rfs_result (RFS_COST, INSN_COST (tmp2)((&h_i_d[INSN_UID (tmp2)])->cost) - INSN_COST (tmp)((&h_i_d[INSN_UID (tmp)])->cost),
2810 tmp, tmp2);
2811
2812 /* If insns are equally good, sort by INSN_LUID (original insn order),
2813 so that we make the sort stable. This minimizes instruction movement,
2814 thus minimizing sched's effect on debugging and cross-jumping. */
2815 return rfs_result (RFS_TIE, INSN_LUID (tmp)(sched_luids[INSN_UID (tmp)]) - INSN_LUID (tmp2)(sched_luids[INSN_UID (tmp2)]), tmp, tmp2);
2816}
2817
2818/* Resort the array A in which only element at index N may be out of order. */
2819
2820HAIFA_INLINE__inline static void
2821swap_sort (rtx_insn **a, int n)
2822{
2823 rtx_insn *insn = a[n - 1];
2824 int i = n - 2;
2825
2826 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2827 {
2828 a[i + 1] = a[i];
2829 i -= 1;
2830 }
2831 a[i + 1] = insn;
2832}
2833
2834/* Add INSN to the insn queue so that it can be executed at least
2835 N_CYCLES after the currently executing insn. Preserve insns
2836 chain for debugging purposes. REASON will be printed in debugging
2837 output. */
2838
2839HAIFA_INLINE__inline static void
2840queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2841{
2842 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles)(((q_ptr)+n_cycles) & max_insn_queue_index);
2843 rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2844 int new_tick;
2845
2846 gcc_assert (n_cycles <= max_insn_queue_index)((void)(!(n_cycles <= max_insn_queue_index) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2846, __FUNCTION__), 0 : 0))
;
2847 gcc_assert (!DEBUG_INSN_P (insn))((void)(!(!(((enum rtx_code) (insn)->code) == DEBUG_INSN))
? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2847, __FUNCTION__), 0 : 0))
;
2848
2849 insn_queue[next_q] = link;
2850 q_size += 1;
2851
2852 if (sched_verbose >= 2)
2853 {
2854 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2855 (*current_sched_info->print_insn) (insn, 0));
2856
2857 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2858 }
2859
2860 QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) = next_q;
2861
2862 if (current_sched_info->flags & DO_BACKTRACKING)
2863 {
2864 new_tick = clock_var + n_cycles;
2865 if (INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick) == INVALID_TICK(-(max_insn_queue_index + 1)) || INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick) < new_tick)
2866 INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick) = new_tick;
2867
2868 if (INSN_EXACT_TICK (insn)((&h_i_d[INSN_UID (insn)])->exact_tick) != INVALID_TICK(-(max_insn_queue_index + 1))
2869 && INSN_EXACT_TICK (insn)((&h_i_d[INSN_UID (insn)])->exact_tick) < clock_var + n_cycles)
2870 {
2871 must_backtrack = true;
2872 if (sched_verbose >= 2)
2873 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2874 }
2875 }
2876}
2877
2878/* Remove INSN from queue. */
2879static void
2880queue_remove (rtx_insn *insn)
2881{
2882 gcc_assert (QUEUE_INDEX (insn) >= 0)((void)(!(((&h_i_d[INSN_UID (insn)])->queue_index) >=
0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2882, __FUNCTION__), 0 : 0))
;
2883 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index)]);
2884 q_size--;
2885 QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) = QUEUE_NOWHERE(-2);
2886}
2887
2888/* Return a pointer to the bottom of the ready list, i.e. the insn
2889 with the lowest priority. */
2890
2891rtx_insn **
2892ready_lastpos (struct ready_list *ready)
2893{
2894 gcc_assert (ready->n_ready >= 1)((void)(!(ready->n_ready >= 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2894, __FUNCTION__), 0 : 0))
;
2895 return ready->vec + ready->first - ready->n_ready + 1;
2896}
2897
2898/* Add an element INSN to the ready list so that it ends up with the
2899 lowest/highest priority depending on FIRST_P. */
2900
2901HAIFA_INLINE__inline static void
2902ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2903{
2904 if (!first_p)
2905 {
2906 if (ready->first == ready->n_ready)
2907 {
2908 memmove (ready->vec + ready->veclen - ready->n_ready,
2909 ready_lastpos (ready),
2910 ready->n_ready * sizeof (rtx));
2911 ready->first = ready->veclen - 1;
2912 }
2913 ready->vec[ready->first - ready->n_ready] = insn;
2914 }
2915 else
2916 {
2917 if (ready->first == ready->veclen - 1)
2918 {
2919 if (ready->n_ready)
2920 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2921 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2922 ready_lastpos (ready),
2923 ready->n_ready * sizeof (rtx));
2924 ready->first = ready->veclen - 2;
2925 }
2926 ready->vec[++(ready->first)] = insn;
2927 }
2928
2929 ready->n_ready++;
2930 if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN))
2931 ready->n_debug++;
2932
2933 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY)((void)(!(((&h_i_d[INSN_UID (insn)])->queue_index) != (
-1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2933, __FUNCTION__), 0 : 0))
;
2934 QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) = QUEUE_READY(-1);
2935
2936 if (INSN_EXACT_TICK (insn)((&h_i_d[INSN_UID (insn)])->exact_tick) != INVALID_TICK(-(max_insn_queue_index + 1))
2937 && INSN_EXACT_TICK (insn)((&h_i_d[INSN_UID (insn)])->exact_tick) < clock_var)
2938 {
2939 must_backtrack = true;
2940 }
2941}
2942
2943/* Remove the element with the highest priority from the ready list and
2944 return it. */
2945
2946HAIFA_INLINE__inline static rtx_insn *
2947ready_remove_first (struct ready_list *ready)
2948{
2949 rtx_insn *t;
2950
2951 gcc_assert (ready->n_ready)((void)(!(ready->n_ready) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2951, __FUNCTION__), 0 : 0))
;
2952 t = ready->vec[ready->first--];
2953 ready->n_ready--;
2954 if (DEBUG_INSN_P (t)(((enum rtx_code) (t)->code) == DEBUG_INSN))
2955 ready->n_debug--;
2956 /* If the queue becomes empty, reset it. */
2957 if (ready->n_ready == 0)
2958 ready->first = ready->veclen - 1;
2959
2960 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY)((void)(!(((&h_i_d[INSN_UID (t)])->queue_index) == (-1
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2960, __FUNCTION__), 0 : 0))
;
2961 QUEUE_INDEX (t)((&h_i_d[INSN_UID (t)])->queue_index) = QUEUE_NOWHERE(-2);
2962
2963 return t;
2964}
2965
2966/* The following code implements multi-pass scheduling for the first
2967 cycle. In other words, we will try to choose ready insn which
2968 permits to start maximum number of insns on the same cycle. */
2969
2970/* Return a pointer to the element INDEX from the ready. INDEX for
2971 insn with the highest priority is 0, and the lowest priority has
2972 N_READY - 1. */
2973
2974rtx_insn *
2975ready_element (struct ready_list *ready, int index)
2976{
2977 gcc_assert (ready->n_ready && index < ready->n_ready)((void)(!(ready->n_ready && index < ready->n_ready
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2977, __FUNCTION__), 0 : 0))
;
2978
2979 return ready->vec[ready->first - index];
2980}
2981
2982/* Remove the element INDEX from the ready list and return it. INDEX
2983 for insn with the highest priority is 0, and the lowest priority
2984 has N_READY - 1. */
2985
2986HAIFA_INLINE__inline static rtx_insn *
2987ready_remove (struct ready_list *ready, int index)
2988{
2989 rtx_insn *t;
2990 int i;
2991
2992 if (index == 0)
2993 return ready_remove_first (ready);
2994 gcc_assert (ready->n_ready && index < ready->n_ready)((void)(!(ready->n_ready && index < ready->n_ready
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 2994, __FUNCTION__), 0 : 0))
;
2995 t = ready->vec[ready->first - index];
2996 ready->n_ready--;
2997 if (DEBUG_INSN_P (t)(((enum rtx_code) (t)->code) == DEBUG_INSN))
2998 ready->n_debug--;
2999 for (i = index; i < ready->n_ready; i++)
3000 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
3001 QUEUE_INDEX (t)((&h_i_d[INSN_UID (t)])->queue_index) = QUEUE_NOWHERE(-2);
3002 return t;
3003}
3004
3005/* Remove INSN from the ready list. */
3006static void
3007ready_remove_insn (rtx_insn *insn)
3008{
3009 int i;
3010
3011 for (i = 0; i < readyp->n_ready; i++)
3012 if (ready_element (readyp, i) == insn)
3013 {
3014 ready_remove (readyp, i);
3015 return;
3016 }
3017 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 3017, __FUNCTION__))
;
3018}
3019
3020/* Calculate difference of two statistics set WAS and NOW.
3021 Result returned in WAS. */
3022static void
3023rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
3024 const rank_for_schedule_stats_t *now)
3025{
3026 for (int i = 0; i < RFS_N; ++i)
3027 was->stats[i] = now->stats[i] - was->stats[i];
3028}
3029
3030/* Print rank_for_schedule statistics. */
3031static void
3032print_rank_for_schedule_stats (const char *prefix,
3033 const rank_for_schedule_stats_t *stats,
3034 struct ready_list *ready)
3035{
3036 for (int i = 0; i < RFS_N; ++i)
3037 if (stats->stats[i])
3038 {
3039 fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
3040
3041 if (ready != NULLnullptr)
3042 /* Print out insns that won due to RFS_<I>. */
3043 {
3044 rtx_insn **p = ready_lastpos (ready);
3045
3046 fprintf (sched_dump, ":");
3047 /* Start with 1 since least-priority insn didn't have any wins. */
3048 for (int j = 1; j < ready->n_ready; ++j)
3049 if (INSN_LAST_RFS_WIN (p[j])((&h_i_d[INSN_UID (p[j])])->last_rfs_win) == i)
3050 fprintf (sched_dump, " %s",
3051 (*current_sched_info->print_insn) (p[j], 0));
3052 }
3053 fprintf (sched_dump, "\n");
3054 }
3055}
3056
3057/* Separate DEBUG_INSNS from normal insns. DEBUG_INSNs go to the end
3058 of array. */
3059static void
3060ready_sort_debug (struct ready_list *ready)
3061{
3062 int i;
3063 rtx_insn **first = ready_lastpos (ready);
3064
3065 for (i = 0; i < ready->n_ready; ++i)
3066 if (!DEBUG_INSN_P (first[i])(((enum rtx_code) (first[i])->code) == DEBUG_INSN))
3067 INSN_RFS_DEBUG_ORIG_ORDER (first[i])((&h_i_d[INSN_UID (first[i])])->rfs_debug_orig_order) = i;
3068
3069 qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug)gcc_qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug
)
;
3070}
3071
3072/* Sort non-debug insns in the ready list READY by ascending priority.
3073 Assumes that all debug insns are separated from the real insns. */
3074static void
3075ready_sort_real (struct ready_list *ready)
3076{
3077 int i;
3078 rtx_insn **first = ready_lastpos (ready);
3079 int n_ready_real = ready->n_ready - ready->n_debug;
3080
3081 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3082 for (i = 0; i < n_ready_real; ++i)
3083 setup_insn_reg_pressure_info (first[i]);
3084 else if (sched_pressure == SCHED_PRESSURE_MODEL
3085 && model_curr_point < model_num_insns)
3086 model_set_excess_costs (first, n_ready_real);
3087
3088 rank_for_schedule_stats_t stats1;
3089 if (sched_verbose >= 4)
3090 stats1 = rank_for_schedule_stats;
3091
3092 if (n_ready_real == 2)
3093 swap_sort (first, n_ready_real);
3094 else if (n_ready_real > 2)
3095 qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule)gcc_qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule
)
;
3096
3097 if (sched_verbose >= 4)
3098 {
3099 rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3100 print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3101 }
3102}
3103
3104/* Sort the ready list READY by ascending priority. */
3105static void
3106ready_sort (struct ready_list *ready)
3107{
3108 if (ready->n_debug > 0)
3109 ready_sort_debug (ready);
3110 else
3111 ready_sort_real (ready);
3112}
3113
3114/* PREV is an insn that is ready to execute. Adjust its priority if that
3115 will help shorten or lengthen register lifetimes as appropriate. Also
3116 provide a hook for the target to tweak itself. */
3117
3118HAIFA_INLINE__inline static void
3119adjust_priority (rtx_insn *prev)
3120{
3121 /* ??? There used to be code here to try and estimate how an insn
3122 affected register lifetimes, but it did it by looking at REG_DEAD
3123 notes, which we removed in schedule_region. Nor did it try to
3124 take into account register pressure or anything useful like that.
3125
3126 Revisit when we have a machine model to work with and not before. */
3127
3128 if (targetm.sched.adjust_priority)
3129 INSN_PRIORITY (prev)((&h_i_d[INSN_UID (prev)])->priority) =
3130 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev)((&h_i_d[INSN_UID (prev)])->priority));
3131}
3132
3133/* Advance DFA state STATE on one cycle. */
3134void
3135advance_state (state_t state)
3136{
3137 if (targetm.sched.dfa_pre_advance_cycle)
3138 targetm.sched.dfa_pre_advance_cycle ();
3139
3140 if (targetm.sched.dfa_pre_cycle_insn)
3141 state_transition (state,
3142 targetm.sched.dfa_pre_cycle_insn ());
3143
3144 state_transition (state, NULLnullptr);
3145
3146 if (targetm.sched.dfa_post_cycle_insn)
3147 state_transition (state,
3148 targetm.sched.dfa_post_cycle_insn ());
3149
3150 if (targetm.sched.dfa_post_advance_cycle)
3151 targetm.sched.dfa_post_advance_cycle ();
3152}
3153
3154/* Advance time on one cycle. */
3155HAIFA_INLINE__inline static void
3156advance_one_cycle (void)
3157{
3158 int i;
3159
3160 advance_state (curr_state);
3161 for (i = 4; i <= sched_verbose; ++i)
3162 fprintf (sched_dump, ";;\tAdvance the current state: %d.\n", clock_var);
3163}
3164
3165/* Update register pressure after scheduling INSN. */
3166static void
3167update_register_pressure (rtx_insn *insn)
3168{
3169 struct reg_use_data *use;
3170 struct reg_set_data *set;
3171
3172 gcc_checking_assert (!DEBUG_INSN_P (insn))((void)(!(!(((enum rtx_code) (insn)->code) == DEBUG_INSN))
? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 3172, __FUNCTION__), 0 : 0))
;
3173
3174 for (use = INSN_REG_USE_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_use_list); use != NULLnullptr; use = use->next_insn_use)
3175 if (dying_use_p (use))
3176 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3177 use->regno, false);
3178 for (set = INSN_REG_SET_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_set_list); set != NULLnullptr; set = set->next_insn_set)
3179 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3180 set->regno, true);
3181}
3182
3183/* Set up or update (if UPDATE_P) max register pressure (see its
3184 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3185 after insn AFTER. */
3186static void
3187setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3188{
3189 int i, p;
3190 bool eq_p;
3191 rtx_insn *insn;
3192 static int max_reg_pressure[N_REG_CLASSES((int) LIM_REG_CLASSES)];
3193
3194 save_reg_pressure ();
3195 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
3196 max_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]]
3197 = curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]];
3198 for (insn = NEXT_INSN (after);
3199 insn != NULL_RTX(rtx) 0 && ! BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)
3200 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3201 insn = NEXT_INSN (insn))
3202 if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN))
)
3203 {
3204 eq_p = true;
3205 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
3206 {
3207 p = max_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]];
3208 if (INSN_MAX_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->max_reg_pressure)[i] != p)
3209 {
3210 eq_p = false;
3211 INSN_MAX_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->max_reg_pressure)[i]
3212 = max_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]];
3213 }
3214 }
3215 if (update_p && eq_p)
3216 break;
3217 update_register_pressure (insn);
3218 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
3219 if (max_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]]
3220 < curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]])
3221 max_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]]
3222 = curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]];
3223 }
3224 restore_reg_pressure ();
3225}
3226
3227/* Update the current register pressure after scheduling INSN. Update
3228 also max register pressure for unscheduled insns of the current
3229 BB. */
3230static void
3231update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3232{
3233 int i;
3234 int before[N_REG_CLASSES((int) LIM_REG_CLASSES)];
3235
3236 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
6
Assuming 'i' is >= field 'x_ira_pressure_classes_num'
7
Loop condition is false. Execution continues on line 3238
3237 before[i] = curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]];
3238 update_register_pressure (insn);
3239 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
8
The value 0 is assigned to 'i'
9
Assuming 'i' is < field 'x_ira_pressure_classes_num'
10
Loop condition is true. Entering loop body
3240 if (curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]] != before[i])
11
The right operand of '!=' is a garbage value
3241 break;
3242 if (i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num))
3243 setup_insn_max_reg_pressure (insn, true);
3244}
3245
3246/* Set up register pressure at the beginning of basic block BB whose
3247 insns starting after insn AFTER. Set up also max register pressure
3248 for all insns of the basic block. */
3249void
3250sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3251{
3252 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED)((void)(!(sched_pressure == SCHED_PRESSURE_WEIGHTED) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 3252, __FUNCTION__), 0 : 0))
;
3253 initiate_bb_reg_pressure_info (bb);
3254 setup_insn_max_reg_pressure (after, false);
3255}
3256
3257/* If doing predication while scheduling, verify whether INSN, which
3258 has just been scheduled, clobbers the conditions of any
3259 instructions that must be predicated in order to break their
3260 dependencies. If so, remove them from the queues so that they will
3261 only be scheduled once their control dependency is resolved. */
3262
3263static void
3264check_clobbered_conditions (rtx_insn *insn)
3265{
3266 HARD_REG_SET t;
3267 int i;
3268
3269 if ((current_sched_info->flags & DO_PREDICATION) == 0)
3270 return;
3271
3272 find_all_hard_reg_sets (insn, &t, true);
3273
3274 restart:
3275 for (i = 0; i < ready.n_ready; i++)
3276 {
3277 rtx_insn *x = ready_element (&ready, i);
3278 if (TODO_SPEC (x)((&h_i_d[INSN_UID (x)])->todo_spec) == DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1)
&& cond_clobbered_p (x, t))
3279 {
3280 ready_remove_insn (x);
3281 goto restart;
3282 }
3283 }
3284 for (i = 0; i <= max_insn_queue_index; i++)
3285 {
3286 rtx_insn_list *link;
3287 int q = NEXT_Q_AFTER (q_ptr, i)(((q_ptr)+i) & max_insn_queue_index);
3288
3289 restart_queue:
3290 for (link = insn_queue[q]; link; link = link->next ())
3291 {
3292 rtx_insn *x = link->insn ();
3293 if (TODO_SPEC (x)((&h_i_d[INSN_UID (x)])->todo_spec) == DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1)
&& cond_clobbered_p (x, t))
3294 {
3295 queue_remove (x);
3296 goto restart_queue;
3297 }
3298 }
3299 }
3300}
3301
3302/* Return (in order):
3303
3304 - positive if INSN adversely affects the pressure on one
3305 register class
3306
3307 - negative if INSN reduces the pressure on one register class
3308
3309 - 0 if INSN doesn't affect the pressure on any register class. */
3310
3311static int
3312model_classify_pressure (struct model_insn_info *insn)
3313{
3314 struct reg_pressure_data *reg_pressure;
3315 int death[N_REG_CLASSES((int) LIM_REG_CLASSES)];
3316 int pci, cl, sum;
3317
3318 calculate_reg_deaths (insn->insn, death);
3319 reg_pressure = INSN_REG_PRESSURE (insn->insn)((&h_i_d[INSN_UID (insn->insn)])->reg_pressure);
3320 sum = 0;
3321 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
3322 {
3323 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
3324 if (death[cl] < reg_pressure[pci].set_increase)
3325 return 1;
3326 sum += reg_pressure[pci].set_increase - death[cl];
3327 }
3328 return sum;
3329}
3330
3331/* Return true if INSN1 should come before INSN2 in the model schedule. */
3332
3333static int
3334model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3335{
3336 unsigned int height1, height2;
3337 unsigned int priority1, priority2;
3338
3339 /* Prefer instructions with a higher model priority. */
3340 if (insn1->model_priority != insn2->model_priority)
3341 return insn1->model_priority > insn2->model_priority;
3342
3343 /* Combine the length of the longest path of satisfied true dependencies
3344 that leads to each instruction (depth) with the length of the longest
3345 path of any dependencies that leads from the instruction (alap).
3346 Prefer instructions with the greatest combined length. If the combined
3347 lengths are equal, prefer instructions with the greatest depth.
3348
3349 The idea is that, if we have a set S of "equal" instructions that each
3350 have ALAP value X, and we pick one such instruction I, any true-dependent
3351 successors of I that have ALAP value X - 1 should be preferred over S.
3352 This encourages the schedule to be "narrow" rather than "wide".
3353 However, if I is a low-priority instruction that we decided to
3354 schedule because of its model_classify_pressure, and if there
3355 is a set of higher-priority instructions T, the aforementioned
3356 successors of I should not have the edge over T. */
3357 height1 = insn1->depth + insn1->alap;
3358 height2 = insn2->depth + insn2->alap;
3359 if (height1 != height2)
3360 return height1 > height2;
3361 if (insn1->depth != insn2->depth)
3362 return insn1->depth > insn2->depth;
3363
3364 /* We have no real preference between INSN1 an INSN2 as far as attempts
3365 to reduce pressure go. Prefer instructions with higher priorities. */
3366 priority1 = INSN_PRIORITY (insn1->insn)((&h_i_d[INSN_UID (insn1->insn)])->priority);
3367 priority2 = INSN_PRIORITY (insn2->insn)((&h_i_d[INSN_UID (insn2->insn)])->priority);
3368 if (priority1 != priority2)
3369 return priority1 > priority2;
3370
3371 /* Use the original rtl sequence as a tie-breaker. */
3372 return insn1 < insn2;
3373}
3374
3375/* Add INSN to the model worklist immediately after PREV. Add it to the
3376 beginning of the list if PREV is null. */
3377
3378static void
3379model_add_to_worklist_at (struct model_insn_info *insn,
3380 struct model_insn_info *prev)
3381{
3382 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE)((void)(!(((&h_i_d[INSN_UID (insn->insn)])->queue_index
) == (-2)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 3382, __FUNCTION__), 0 : 0))
;
3383 QUEUE_INDEX (insn->insn)((&h_i_d[INSN_UID (insn->insn)])->queue_index) = QUEUE_READY(-1);
3384
3385 insn->prev = prev;
3386 if (prev)
3387 {
3388 insn->next = prev->next;
3389 prev->next = insn;
3390 }
3391 else
3392 {
3393 insn->next = model_worklist;
3394 model_worklist = insn;
3395 }
3396 if (insn->next)
3397 insn->next->prev = insn;
3398}
3399
3400/* Remove INSN from the model worklist. */
3401
3402static void
3403model_remove_from_worklist (struct model_insn_info *insn)
3404{
3405 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY)((void)(!(((&h_i_d[INSN_UID (insn->insn)])->queue_index
) == (-1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 3405, __FUNCTION__), 0 : 0))
;
3406 QUEUE_INDEX (insn->insn)((&h_i_d[INSN_UID (insn->insn)])->queue_index) = QUEUE_NOWHERE(-2);
3407
3408 if (insn->prev)
3409 insn->prev->next = insn->next;
3410 else
3411 model_worklist = insn->next;
3412 if (insn->next)
3413 insn->next->prev = insn->prev;
3414}
3415
3416/* Add INSN to the model worklist. Start looking for a suitable position
3417 between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns
3418 insns either side. A null PREV indicates the beginning of the list and
3419 a null NEXT indicates the end. */
3420
3421static void
3422model_add_to_worklist (struct model_insn_info *insn,
3423 struct model_insn_info *prev,
3424 struct model_insn_info *next)
3425{
3426 int count;
3427
3428 count = param_max_sched_ready_insnsglobal_options.x_param_max_sched_ready_insns;
3429 if (count > 0 && prev && model_order_p (insn, prev))
3430 do
3431 {
3432 count--;
3433 prev = prev->prev;
3434 }
3435 while (count > 0 && prev && model_order_p (insn, prev));
3436 else
3437 while (count > 0 && next && model_order_p (next, insn))
3438 {
3439 count--;
3440 prev = next;
3441 next = next->next;
3442 }
3443 model_add_to_worklist_at (insn, prev);
3444}
3445
3446/* INSN may now have a higher priority (in the model_order_p sense)
3447 than before. Move it up the worklist if necessary. */
3448
3449static void
3450model_promote_insn (struct model_insn_info *insn)
3451{
3452 struct model_insn_info *prev;
3453 int count;
3454
3455 prev = insn->prev;
3456 count = param_max_sched_ready_insnsglobal_options.x_param_max_sched_ready_insns;
3457 while (count > 0 && prev && model_order_p (insn, prev))
3458 {
3459 count--;
3460 prev = prev->prev;
3461 }
3462 if (prev != insn->prev)
3463 {
3464 model_remove_from_worklist (insn);
3465 model_add_to_worklist_at (insn, prev);
3466 }
3467}
3468
3469/* Add INSN to the end of the model schedule. */
3470
3471static void
3472model_add_to_schedule (rtx_insn *insn)
3473{
3474 unsigned int point;
3475
3476 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE)((void)(!(((&h_i_d[INSN_UID (insn)])->queue_index) == (
-2)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 3476, __FUNCTION__), 0 : 0))
;
3477 QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) = QUEUE_SCHEDULED(-3);
3478
3479 point = model_schedule.length ();
3480 model_schedule.quick_push (insn);
3481 INSN_MODEL_INDEX (insn)((&h_i_d[INSN_UID (insn)])->model_index) = point + 1;
3482}
3483
3484/* Analyze the instructions that are to be scheduled, setting up
3485 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3486 instructions to model_worklist. */
3487
3488static void
3489model_analyze_insns (void)
3490{
3491 rtx_insn *start, *end, *iter;
3492 sd_iterator_def sd_it;
3493 dep_t dep;
3494 struct model_insn_info *insn, *con;
3495
3496 model_num_insns = 0;
3497 start = PREV_INSN (current_sched_info->next_tail);
3498 end = current_sched_info->prev_head;
3499 for (iter = start; iter != end; iter = PREV_INSN (iter))
3500 if (NONDEBUG_INSN_P (iter)((((enum rtx_code) (iter)->code) == INSN) || (((enum rtx_code
) (iter)->code) == JUMP_INSN) || (((enum rtx_code) (iter)->
code) == CALL_INSN))
)
3501 {
3502 insn = MODEL_INSN_INFO (iter)(&model_insns[(sched_luids[INSN_UID (iter)])]);
3503 insn->insn = iter;
3504 FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)for ((sd_it) = sd_iterator_start ((iter), ((4))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
3505 {
3506 con = MODEL_INSN_INFO (DEP_CON (dep))(&model_insns[(sched_luids[INSN_UID (((dep)->con))])]);
3507 if (con->insn && insn->alap < con->alap + 1)
3508 insn->alap = con->alap + 1;
3509 }
3510
3511 insn->old_queue = QUEUE_INDEX (iter)((&h_i_d[INSN_UID (iter)])->queue_index);
3512 QUEUE_INDEX (iter)((&h_i_d[INSN_UID (iter)])->queue_index) = QUEUE_NOWHERE(-2);
3513
3514 insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK(1));
3515 if (insn->unscheduled_preds == 0)
3516 model_add_to_worklist (insn, NULLnullptr, model_worklist);
3517
3518 model_num_insns++;
3519 }
3520}
3521
3522/* The global state describes the register pressure at the start of the
3523 model schedule. Initialize GROUP accordingly. */
3524
3525static void
3526model_init_pressure_group (struct model_pressure_group *group)
3527{
3528 int pci, cl;
3529
3530 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
3531 {
3532 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
3533 group->limits[pci].pressure = curr_reg_pressure[cl];
3534 group->limits[pci].point = 0;
3535 }
3536 /* Use index model_num_insns to record the state after the last
3537 instruction in the model schedule. */
3538 group->model = XNEWVEC (struct model_pressure_data,((struct model_pressure_data *) xmalloc (sizeof (struct model_pressure_data
) * ((model_num_insns + 1) * (this_target_ira->x_ira_pressure_classes_num
))))
3539 (model_num_insns + 1) * ira_pressure_classes_num)((struct model_pressure_data *) xmalloc (sizeof (struct model_pressure_data
) * ((model_num_insns + 1) * (this_target_ira->x_ira_pressure_classes_num
))))
;
3540}
3541
3542/* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3543 Update the maximum pressure for the whole schedule. */
3544
3545static void
3546model_record_pressure (struct model_pressure_group *group,
3547 int point, int pci, int pressure)
3548{
3549 MODEL_REF_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->ref_pressure)
= pressure;
3550 if (group->limits[pci].pressure < pressure)
3551 {
3552 group->limits[pci].pressure = pressure;
3553 group->limits[pci].point = point;
3554 }
3555}
3556
3557/* INSN has just been added to the end of the model schedule. Record its
3558 register-pressure information. */
3559
3560static void
3561model_record_pressures (struct model_insn_info *insn)
3562{
3563 struct reg_pressure_data *reg_pressure;
3564 int point, pci, cl, delta;
3565 int death[N_REG_CLASSES((int) LIM_REG_CLASSES)];
3566
3567 point = model_index (insn->insn);
3568 if (sched_verbose >= 2)
3569 {
3570 if (point == 0)
3571 {
3572 fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3573 fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3574 }
3575 fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3576 point, INSN_UID (insn->insn), insn->model_priority,
3577 insn->depth + insn->alap, insn->depth,
3578 INSN_PRIORITY (insn->insn)((&h_i_d[INSN_UID (insn->insn)])->priority),
3579 str_pattern_slim (PATTERN (insn->insn)));
3580 }
3581 calculate_reg_deaths (insn->insn, death);
3582 reg_pressure = INSN_REG_PRESSURE (insn->insn)((&h_i_d[INSN_UID (insn->insn)])->reg_pressure);
3583 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
3584 {
3585 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
3586 delta = reg_pressure[pci].set_increase - death[cl];
3587 if (sched_verbose >= 2)
3588 fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3589 curr_reg_pressure[cl], delta);
3590 model_record_pressure (&model_before_pressure, point, pci,
3591 curr_reg_pressure[cl]);
3592 }
3593 if (sched_verbose >= 2)
3594 fprintf (sched_dump, "\n");
3595}
3596
3597/* All instructions have been added to the model schedule. Record the
3598 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3599
3600static void
3601model_record_final_pressures (struct model_pressure_group *group)
3602{
3603 int point, pci, max_pressure, ref_pressure, cl;
3604
3605 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
3606 {
3607 /* Record the final pressure for this class. */
3608 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
3609 point = model_num_insns;
3610 ref_pressure = curr_reg_pressure[cl];
3611 model_record_pressure (group, point, pci, ref_pressure);
3612
3613 /* Record the original maximum pressure. */
3614 group->limits[pci].orig_pressure = group->limits[pci].pressure;
3615
3616 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3617 max_pressure = ref_pressure;
3618 MODEL_MAX_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
= max_pressure;
3619 while (point > 0)
3620 {
3621 point--;
3622 ref_pressure = MODEL_REF_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->ref_pressure)
;
3623 max_pressure = MAX (max_pressure, ref_pressure)((max_pressure) > (ref_pressure) ? (max_pressure) : (ref_pressure
))
;
3624 MODEL_MAX_PRESSURE (group, point, pci)((&(group)->model[(point) * (this_target_ira->x_ira_pressure_classes_num
) + (pci)])->max_pressure)
= max_pressure;
3625 }
3626 }
3627}
3628
3629/* Update all successors of INSN, given that INSN has just been scheduled. */
3630
3631static void
3632model_add_successors_to_worklist (struct model_insn_info *insn)
3633{
3634 sd_iterator_def sd_it;
3635 struct model_insn_info *con;
3636 dep_t dep;
3637
3638 FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn->insn), ((4))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
3639 {
3640 con = MODEL_INSN_INFO (DEP_CON (dep))(&model_insns[(sched_luids[INSN_UID (((dep)->con))])]);
3641 /* Ignore debug instructions, and instructions from other blocks. */
3642 if (con->insn)
3643 {
3644 con->unscheduled_preds--;
3645
3646 /* Update the depth field of each true-dependent successor.
3647 Increasing the depth gives them a higher priority than
3648 before. */
3649 if (DEP_TYPE (dep)((dep)->type) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3650 {
3651 con->depth = insn->depth + 1;
3652 if (QUEUE_INDEX (con->insn)((&h_i_d[INSN_UID (con->insn)])->queue_index) == QUEUE_READY(-1))
3653 model_promote_insn (con);
3654 }
3655
3656 /* If this is a true dependency, or if there are no remaining
3657 dependencies for CON (meaning that CON only had non-true
3658 dependencies), make sure that CON is on the worklist.
3659 We don't bother otherwise because it would tend to fill the
3660 worklist with a lot of low-priority instructions that are not
3661 yet ready to issue. */
3662 if ((con->depth > 0 || con->unscheduled_preds == 0)
3663 && QUEUE_INDEX (con->insn)((&h_i_d[INSN_UID (con->insn)])->queue_index) == QUEUE_NOWHERE(-2))
3664 model_add_to_worklist (con, insn, insn->next);
3665 }
3666 }
3667}
3668
3669/* Give INSN a higher priority than any current instruction, then give
3670 unscheduled predecessors of INSN a higher priority still. If any of
3671 those predecessors are not on the model worklist, do the same for its
3672 predecessors, and so on. */
3673
3674static void
3675model_promote_predecessors (struct model_insn_info *insn)
3676{
3677 struct model_insn_info *pro, *first;
3678 sd_iterator_def sd_it;
3679 dep_t dep;
3680
3681 if (sched_verbose >= 7)
3682 fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3683 INSN_UID (insn->insn), model_next_priority);
3684 insn->model_priority = model_next_priority++;
3685 model_remove_from_worklist (insn);
3686 model_add_to_worklist_at (insn, NULLnullptr);
3687
3688 first = NULLnullptr;
3689 for (;;)
3690 {
3691 FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn->insn), ((1))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
3692 {
3693 pro = MODEL_INSN_INFO (DEP_PRO (dep))(&model_insns[(sched_luids[INSN_UID (((dep)->pro))])]);
3694 /* The first test is to ignore debug instructions, and instructions
3695 from other blocks. */
3696 if (pro->insn
3697 && pro->model_priority != model_next_priority
3698 && QUEUE_INDEX (pro->insn)((&h_i_d[INSN_UID (pro->insn)])->queue_index) != QUEUE_SCHEDULED(-3))
3699 {
3700 pro->model_priority = model_next_priority;
3701 if (sched_verbose >= 7)
3702 fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3703 if (QUEUE_INDEX (pro->insn)((&h_i_d[INSN_UID (pro->insn)])->queue_index) == QUEUE_READY(-1))
3704 {
3705 /* PRO is already in the worklist, but it now has
3706 a higher priority than before. Move it at the
3707 appropriate place. */
3708 model_remove_from_worklist (pro);
3709 model_add_to_worklist (pro, NULLnullptr, model_worklist);
3710 }
3711 else
3712 {
3713 /* PRO isn't in the worklist. Recursively process
3714 its predecessors until we find one that is. */
3715 pro->next = first;
3716 first = pro;
3717 }
3718 }
3719 }
3720 if (!first)
3721 break;
3722 insn = first;
3723 first = insn->next;
3724 }
3725 if (sched_verbose >= 7)
3726 fprintf (sched_dump, " = %d\n", model_next_priority);
3727 model_next_priority++;
3728}
3729
3730/* Pick one instruction from model_worklist and process it. */
3731
3732static void
3733model_choose_insn (void)
3734{
3735 struct model_insn_info *insn, *fallback;
3736 int count;
3737
3738 if (sched_verbose >= 7)
3739 {
3740 fprintf (sched_dump, ";;\t+--- worklist:\n");
3741 insn = model_worklist;
3742 count = param_max_sched_ready_insnsglobal_options.x_param_max_sched_ready_insns;
3743 while (count > 0 && insn)
3744 {
3745 fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
3746 INSN_UID (insn->insn), insn->model_priority,
3747 insn->depth + insn->alap, insn->depth,
3748 INSN_PRIORITY (insn->insn)((&h_i_d[INSN_UID (insn->insn)])->priority));
3749 count--;
3750 insn = insn->next;
3751 }
3752 }
3753
3754 /* Look for a ready instruction whose model_classify_priority is zero
3755 or negative, picking the highest-priority one. Adding such an
3756 instruction to the schedule now should do no harm, and may actually
3757 do some good.
3758
3759 Failing that, see whether there is an instruction with the highest
3760 extant model_priority that is not yet ready, but which would reduce
3761 pressure if it became ready. This is designed to catch cases like:
3762
3763 (set (mem (reg R1)) (reg R2))
3764
3765 where the instruction is the last remaining use of R1 and where the
3766 value of R2 is not yet available (or vice versa). The death of R1
3767 means that this instruction already reduces pressure. It is of
3768 course possible that the computation of R2 involves other registers
3769 that are hard to kill, but such cases are rare enough for this
3770 heuristic to be a win in general.
3771
3772 Failing that, just pick the highest-priority instruction in the
3773 worklist. */
3774 count = param_max_sched_ready_insnsglobal_options.x_param_max_sched_ready_insns;
3775 insn = model_worklist;
3776 fallback = 0;
3777 for (;;)
3778 {
3779 if (count == 0 || !insn)
3780 {
3781 insn = fallback ? fallback : model_worklist;
3782 break;
3783 }
3784 if (insn->unscheduled_preds)
3785 {
3786 if (model_worklist->model_priority == insn->model_priority
3787 && !fallback
3788 && model_classify_pressure (insn) < 0)
3789 fallback = insn;
3790 }
3791 else
3792 {
3793 if (model_classify_pressure (insn) <= 0)
3794 break;
3795 }
3796 count--;
3797 insn = insn->next;
3798 }
3799
3800 if (sched_verbose >= 7 && insn != model_worklist)
3801 {
3802 if (insn->unscheduled_preds)
3803 fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3804 INSN_UID (insn->insn));
3805 else
3806 fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3807 INSN_UID (insn->insn));
3808 }
3809 if (insn->unscheduled_preds)
3810 /* INSN isn't yet ready to issue. Give all its predecessors the
3811 highest priority. */
3812 model_promote_predecessors (insn);
3813 else
3814 {
3815 /* INSN is ready. Add it to the end of model_schedule and
3816 process its successors. */
3817 model_add_successors_to_worklist (insn);
3818 model_remove_from_worklist (insn);
3819 model_add_to_schedule (insn->insn);
3820 model_record_pressures (insn);
3821 update_register_pressure (insn->insn);
3822 }
3823}
3824
3825/* Restore all QUEUE_INDEXs to the values that they had before
3826 model_start_schedule was called. */
3827
3828static void
3829model_reset_queue_indices (void)
3830{
3831 unsigned int i;
3832 rtx_insn *insn;
3833
3834 FOR_EACH_VEC_ELT (model_schedule, i, insn)for (i = 0; (model_schedule).iterate ((i), &(insn)); ++(i
))
3835 QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) = MODEL_INSN_INFO (insn)(&model_insns[(sched_luids[INSN_UID (insn)])])->old_queue;
3836}
3837
3838/* We have calculated the model schedule and spill costs. Print a summary
3839 to sched_dump. */
3840
3841static void
3842model_dump_pressure_summary (void)
3843{
3844 int pci, cl;
3845
3846 fprintf (sched_dump, ";; Pressure summary:");
3847 for (pci = 0; pci < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); pci++)
3848 {
3849 cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[pci];
3850 fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3851 model_before_pressure.limits[pci].pressure);
3852 }
3853 fprintf (sched_dump, "\n\n");
3854}
3855
3856/* Initialize the SCHED_PRESSURE_MODEL information for the current
3857 scheduling region. */
3858
3859static void
3860model_start_schedule (basic_block bb)
3861{
3862 model_next_priority = 1;
3863 model_schedule.create (sched_max_luid);
3864 model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid)((struct model_insn_info *) xcalloc ((sched_max_luid), sizeof
(struct model_insn_info)))
;
3865
3866 gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)))((void)(!(bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info
->prev_head))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 3866, __FUNCTION__), 0 : 0))
;
3867 initiate_reg_pressure_info (df_get_live_in (bb));
3868
3869 model_analyze_insns ();
3870 model_init_pressure_group (&model_before_pressure);
3871 while (model_worklist)
3872 model_choose_insn ();
3873 gcc_assert (model_num_insns == (int) model_schedule.length ())((void)(!(model_num_insns == (int) model_schedule.length ()) ?
fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 3873, __FUNCTION__), 0 : 0))
;
3874 if (sched_verbose >= 2)
3875 fprintf (sched_dump, "\n");
3876
3877 model_record_final_pressures (&model_before_pressure);
3878 model_reset_queue_indices ();
3879
3880 XDELETEVEC (model_insns)free ((void*) (model_insns));
3881
3882 model_curr_point = 0;
3883 initiate_reg_pressure_info (df_get_live_in (bb));
3884 if (sched_verbose >= 1)
3885 model_dump_pressure_summary ();
3886}
3887
3888/* Free the information associated with GROUP. */
3889
3890static void
3891model_finalize_pressure_group (struct model_pressure_group *group)
3892{
3893 XDELETEVEC (group->model)free ((void*) (group->model));
3894}
3895
3896/* Free the information created by model_start_schedule. */
3897
3898static void
3899model_end_schedule (void)
3900{
3901 model_finalize_pressure_group (&model_before_pressure);
3902 model_schedule.release ();
3903}
3904
3905/* Prepare reg pressure scheduling for basic block BB. */
3906static void
3907sched_pressure_start_bb (basic_block bb)
3908{
3909 /* Set the number of available registers for each class taking into account
3910 relative probability of current basic block versus function prologue and
3911 epilogue.
3912 * If the basic block executes much more often than the prologue/epilogue
3913 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3914 nil, so the effective number of available registers is
3915 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl] - 0).
3916 * If the basic block executes as often as the prologue/epilogue,
3917 then spill in the block is as costly as in the prologue, so the effective
3918 number of available registers is
3919 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3920 - call_saved_regs_num[cl]).
3921 Note that all-else-equal, we prefer to spill in the prologue, since that
3922 allows "extra" registers for other basic blocks of the function.
3923 * If the basic block is on the cold path of the function and executes
3924 rarely, then we should always prefer to spill in the block, rather than
3925 in the prologue/epilogue. The effective number of available register is
3926 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3927 - call_saved_regs_num[cl]). */
3928 {
3929 int i;
3930 int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->count.to_frequency (cfun(cfun + 0));
3931 int bb_freq = bb->count.to_frequency (cfun(cfun + 0));
3932
3933 if (bb_freq == 0)
3934 {
3935 if (entry_freq == 0)
3936 entry_freq = bb_freq = 1;
3937 }
3938 if (bb_freq < entry_freq)
3939 bb_freq = entry_freq;
3940
3941 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); ++i)
3942 {
3943 enum reg_class cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i];
3944 sched_class_regs_num[cl] = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[cl]
3945 - fixed_regs_num[cl];
3946 sched_class_regs_num[cl]
3947 -= (call_saved_regs_num[cl] * entry_freq) / bb_freq;
3948 }
3949 }
3950
3951 if (sched_pressure == SCHED_PRESSURE_MODEL)
3952 model_start_schedule (bb);
3953}
3954
3955/* A structure that holds local state for the loop in schedule_block. */
3956struct sched_block_state
3957{
3958 /* True if no real insns have been scheduled in the current cycle. */
3959 bool first_cycle_insn_p;
3960 /* True if a shadow insn has been scheduled in the current cycle, which
3961 means that no more normal insns can be issued. */
3962 bool shadows_only_p;
3963 /* True if we're winding down a modulo schedule, which means that we only
3964 issue insns with INSN_EXACT_TICK set. */
3965 bool modulo_epilogue;
3966 /* Initialized with the machine's issue rate every cycle, and updated
3967 by calls to the variable_issue hook. */
3968 int can_issue_more;
3969};
3970
3971/* INSN is the "currently executing insn". Launch each insn which was
3972 waiting on INSN. READY is the ready list which contains the insns
3973 that are ready to fire. CLOCK is the current cycle. The function
3974 returns necessary cycle advance after issuing the insn (it is not
3975 zero for insns in a schedule group). */
3976
3977static int
3978schedule_insn (rtx_insn *insn)
3979{
3980 sd_iterator_def sd_it;
3981 dep_t dep;
3982 int i;
3983 int advance = 0;
3984
3985 if (sched_verbose >= 1)
1
Assuming 'sched_verbose' is < 1
3986 {
3987 struct reg_pressure_data *pressure_info;
3988 fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3989 clock_var, (*current_sched_info->print_insn) (insn, 1),
3990 str_pattern_slim (PATTERN (insn)));
3991
3992 if (recog_memoized (insn) < 0)
3993 fprintf (sched_dump, "nothing");
3994 else
3995 print_reservation (sched_dump, insn);
3996 pressure_info = INSN_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->reg_pressure);
3997 if (pressure_info != NULLnullptr)
3998 {
3999 fputc (':', sched_dump);
4000 for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++)
4001 fprintf (sched_dump, "%s%s%+d(%d)",
4002 scheduled_insns.length () > 1
4003 && INSN_LUID (insn)(sched_luids[INSN_UID (insn)])
4004 < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2])(sched_luids[INSN_UID (scheduled_insns[scheduled_insns.length
() - 2])])
? "@" : "",
4005 reg_class_names[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]],
4006 pressure_info[i].set_increase, pressure_info[i].change);
4007 }
4008 if (sched_pressure == SCHED_PRESSURE_MODEL
4009 && model_curr_point < model_num_insns
4010 && model_index (insn) == model_curr_point)
4011 fprintf (sched_dump, ":model %d", model_curr_point);
4012 fputc ('\n', sched_dump);
4013 }
4014
4015 if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN))
2
Assuming 'sched_pressure' is equal to SCHED_PRESSURE_WEIGHTED
3
Assuming field 'code' is not equal to DEBUG_INSN
4
Taking true branch
4016 update_reg_and_insn_max_reg_pressure (insn);
5
Calling 'update_reg_and_insn_max_reg_pressure'
4017
4018 /* Scheduling instruction should have all its dependencies resolved and
4019 should have been removed from the ready list. */
4020 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK))((void)(!(sd_lists_empty_p (insn, (1))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4020, __FUNCTION__), 0 : 0))
;
4021
4022 /* Reset debug insns invalidated by moving this insn. */
4023 if (MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments && !DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN))
4024 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK((1) | (2)));
4025 sd_iterator_cond (&sd_it, &dep);)
4026 {
4027 rtx_insn *dbg = DEP_PRO (dep)((dep)->pro);
4028 struct reg_use_data *use, *next;
4029
4030 if (DEP_STATUS (dep)((dep)->status) & DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
)
4031 {
4032 sd_iterator_next (&sd_it);
4033 continue;
4034 }
4035
4036 gcc_assert (DEBUG_BIND_INSN_P (dbg))((void)(!(((((enum rtx_code) (dbg)->code) == DEBUG_INSN) &&
(((enum rtx_code) (PATTERN (dbg))->code) == VAR_LOCATION)
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4036, __FUNCTION__), 0 : 0))
;
4037
4038 if (sched_verbose >= 6)
4039 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
4040 INSN_UID (dbg));
4041
4042 /* ??? Rather than resetting the debug insn, we might be able
4043 to emit a debug temp before the just-scheduled insn, but
4044 this would involve checking that the expression at the
4045 point of the debug insn is equivalent to the expression
4046 before the just-scheduled insn. They might not be: the
4047 expression in the debug insn may depend on other insns not
4048 yet scheduled that set MEMs, REGs or even other debug
4049 insns. It's not clear that attempting to preserve debug
4050 information in these cases is worth the effort, given how
4051 uncommon these resets are and the likelihood that the debug
4052 temps introduced won't survive the schedule change. */
4053 INSN_VAR_LOCATION_LOC (dbg)((((((__extension__ ({ __typeof (PATTERN (dbg)) const _rtx = (
PATTERN (dbg)); if (((enum rtx_code) (_rtx)->code) != VAR_LOCATION
) rtl_check_failed_flag ("INSN_VAR_LOCATION", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4053, __FUNCTION__); _rtx; }))))->u.fld[1]).rt_rtx))
= gen_rtx_UNKNOWN_VAR_LOC ()(gen_rtx_fmt_e_stat ((CLOBBER), ((((void) 0, E_VOIDmode))), (
((const_int_rtx[64]))) ))
;
4054 df_insn_rescan (dbg);
4055
4056 /* Unknown location doesn't use any registers. */
4057 for (use = INSN_REG_USE_LIST (dbg)((&h_i_d[INSN_UID (dbg)])->reg_use_list); use != NULLnullptr; use = next)
4058 {
4059 struct reg_use_data *prev = use;
4060
4061 /* Remove use from the cyclic next_regno_use chain first. */
4062 while (prev->next_regno_use != use)
4063 prev = prev->next_regno_use;
4064 prev->next_regno_use = use->next_regno_use;
4065 next = use->next_insn_use;
4066 free (use);
4067 }
4068 INSN_REG_USE_LIST (dbg)((&h_i_d[INSN_UID (dbg)])->reg_use_list) = NULLnullptr;
4069
4070 /* We delete rather than resolve these deps, otherwise we
4071 crash in sched_free_deps(), because forward deps are
4072 expected to be released before backward deps. */
4073 sd_delete_dep (sd_it);
4074 }
4075
4076 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE)((void)(!(((&h_i_d[INSN_UID (insn)])->queue_index) == (
-2)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4076, __FUNCTION__), 0 : 0))
;
4077 QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) = QUEUE_SCHEDULED(-3);
4078
4079 if (sched_pressure == SCHED_PRESSURE_MODEL
4080 && model_curr_point < model_num_insns
4081 && NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN))
)
4082 {
4083 if (model_index (insn) == model_curr_point)
4084 do
4085 model_curr_point++;
4086 while (model_curr_point < model_num_insns
4087 && (QUEUE_INDEX (MODEL_INSN (model_curr_point))((&h_i_d[INSN_UID ((model_schedule[model_curr_point]))])->
queue_index)
4088 == QUEUE_SCHEDULED(-3)));
4089 else
4090 model_recompute (insn);
4091 model_update_limit_points ();
4092 update_register_pressure (insn);
4093 if (sched_verbose >= 2)
4094 print_curr_reg_pressure ();
4095 }
4096
4097 gcc_assert (INSN_TICK (insn) >= MIN_TICK)((void)(!(((&h_i_d[INSN_UID (insn)])->tick) >= (-max_insn_queue_index
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4097, __FUNCTION__), 0 : 0))
;
4098 if (INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick) > clock_var)
4099 /* INSN has been prematurely moved from the queue to the ready list.
4100 This is possible only if following flags are set. */
4101 gcc_assert (flag_sched_stalled_insns || sched_fusion)((void)(!(global_options.x_flag_sched_stalled_insns || sched_fusion
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4101, __FUNCTION__), 0 : 0))
;
4102
4103 /* ??? Probably, if INSN is scheduled prematurely, we should leave
4104 INSN_TICK untouched. This is a machine-dependent issue, actually. */
4105 INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick) = clock_var;
4106
4107 check_clobbered_conditions (insn);
4108
4109 /* Update dependent instructions. First, see if by scheduling this insn
4110 now we broke a dependence in a way that requires us to change another
4111 insn. */
4112 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK(2));
4113 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4114 {
4115 struct dep_replacement *desc = DEP_REPLACE (dep)((dep)->replace);
4116 rtx_insn *pro = DEP_PRO (dep)((dep)->pro);
4117 if (QUEUE_INDEX (pro)((&h_i_d[INSN_UID (pro)])->queue_index) != QUEUE_SCHEDULED(-3)
4118 && desc != NULLnullptr && desc->insn == pro)
4119 apply_replacement (dep, false);
4120 }
4121
4122 /* Go through and resolve forward dependencies. */
4123 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW(4));
4124 sd_iterator_cond (&sd_it, &dep);)
4125 {
4126 rtx_insn *next = DEP_CON (dep)((dep)->con);
4127 bool cancelled = (DEP_STATUS (dep)((dep)->status) & DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
) != 0;
4128
4129 /* Resolve the dependence between INSN and NEXT.
4130 sd_resolve_dep () moves current dep to another list thus
4131 advancing the iterator. */
4132 sd_resolve_dep (sd_it);
4133
4134 if (cancelled)
4135 {
4136 if (must_restore_pattern_p (next, dep))
4137 restore_pattern (dep, false);
4138 continue;
4139 }
4140
4141 /* Don't bother trying to mark next as ready if insn is a debug
4142 insn. If insn is the last hard dependency, it will have
4143 already been discounted. */
4144 if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) && !DEBUG_INSN_P (next)(((enum rtx_code) (next)->code) == DEBUG_INSN))
4145 continue;
4146
4147 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn)(((&h_i_d[INSN_UID (insn)])->recovery_block) != nullptr
&& ((&h_i_d[INSN_UID (insn)])->recovery_block
) != (((cfun + 0))->cfg->x_exit_block_ptr))
)
4148 {
4149 int effective_cost;
4150
4151 effective_cost = try_ready (next);
4152
4153 if (effective_cost >= 0
4154 && SCHED_GROUP_P (next)(__extension__ ({ __typeof ((next)) const _rtx = ((next)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) (
_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4154, __FUNCTION__); _rtx; })->in_struct)
4155 && advance < effective_cost)
4156 advance = effective_cost;
4157 }
4158 else
4159 /* Check always has only one forward dependence (to the first insn in
4160 the recovery block), therefore, this will be executed only once. */
4161 {
4162 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW))((void)(!(sd_lists_empty_p (insn, (4))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4162, __FUNCTION__), 0 : 0))
;
4163 fix_recovery_deps (RECOVERY_BLOCK (insn)((&h_i_d[INSN_UID (insn)])->recovery_block));
4164 }
4165 }
4166
4167 /* Annotate the instruction with issue information -- TImode
4168 indicates that the instruction is expected not to be able
4169 to issue on the same cycle as the previous insn. A machine
4170 may use this information to decide how the instruction should
4171 be aligned. */
4172 if (issue_rate > 1
4173 && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != USE
4174 && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != CLOBBER
4175 && !DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN))
4176 {
4177 if (reload_completed)
4178 PUT_MODE (insn, clock_var > last_clock_var ? TImode(scalar_int_mode ((scalar_int_mode::from_int) E_TImode)) : VOIDmode((void) 0, E_VOIDmode));
4179 last_clock_var = clock_var;
4180 }
4181
4182 if (nonscheduled_insns_begin != NULL_RTX(rtx) 0)
4183 /* Indicate to debug counters that INSN is scheduled. */
4184 nonscheduled_insns_begin = insn;
4185
4186 return advance;
4187}
4188
4189/* Functions for handling of notes. */
4190
4191/* Add note list that ends on FROM_END to the end of TO_ENDP. */
4192void
4193concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4194{
4195 rtx_insn *from_start;
4196
4197 /* It's easy when have nothing to concat. */
4198 if (from_end == NULLnullptr)
4199 return;
4200
4201 /* It's also easy when destination is empty. */
4202 if (*to_endp == NULLnullptr)
4203 {
4204 *to_endp = from_end;
4205 return;
4206 }
4207
4208 from_start = from_end;
4209 while (PREV_INSN (from_start) != NULLnullptr)
4210 from_start = PREV_INSN (from_start);
4211
4212 SET_PREV_INSN (from_start) = *to_endp;
4213 SET_NEXT_INSN (*to_endp) = from_start;
4214 *to_endp = from_end;
4215}
4216
4217/* Delete notes between HEAD and TAIL and put them in the chain
4218 of notes ended by NOTE_LIST. */
4219void
4220remove_notes (rtx_insn *head, rtx_insn *tail)
4221{
4222 rtx_insn *next_tail, *insn, *next;
4223
4224 note_list = 0;
4225 if (head == tail && !INSN_P (head)(((((enum rtx_code) (head)->code) == INSN) || (((enum rtx_code
) (head)->code) == JUMP_INSN) || (((enum rtx_code) (head)->
code) == CALL_INSN)) || (((enum rtx_code) (head)->code) ==
DEBUG_INSN))
)
4226 return;
4227
4228 next_tail = NEXT_INSN (tail);
4229 for (insn = head; insn != next_tail; insn = next)
4230 {
4231 next = NEXT_INSN (insn);
4232 if (!NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE))
4233 continue;
4234
4235 switch (NOTE_KIND (insn)(((insn)->u.fld[4]).rt_int))
4236 {
4237 case NOTE_INSN_BASIC_BLOCK:
4238 continue;
4239
4240 case NOTE_INSN_EPILOGUE_BEG:
4241 if (insn != tail)
4242 {
4243 remove_insn (insn);
4244 /* If an insn was split just before the EPILOGUE_BEG note and
4245 that split created new basic blocks, we could have a
4246 BASIC_BLOCK note here. Safely advance over it in that case
4247 and assert that we land on a real insn. */
4248 if (NOTE_P (next)(((enum rtx_code) (next)->code) == NOTE)
4249 && NOTE_KIND (next)(((next)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK
4250 && next != next_tail)
4251 next = NEXT_INSN (next);
4252 gcc_assert (INSN_P (next))((void)(!((((((enum rtx_code) (next)->code) == INSN) || ((
(enum rtx_code) (next)->code) == JUMP_INSN) || (((enum rtx_code
) (next)->code) == CALL_INSN)) || (((enum rtx_code) (next)
->code) == DEBUG_INSN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4252, __FUNCTION__), 0 : 0))
;
4253 add_reg_note (next, REG_SAVE_NOTE,
4254 GEN_INT (NOTE_INSN_EPILOGUE_BEG)gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (NOTE_INSN_EPILOGUE_BEG
))
);
4255 break;
4256 }
4257 /* FALLTHRU */
4258
4259 default:
4260 remove_insn (insn);
4261
4262 /* Add the note to list that ends at NOTE_LIST. */
4263 SET_PREV_INSN (insn) = note_list;
4264 SET_NEXT_INSN (insn) = NULL_RTX(rtx) 0;
4265 if (note_list)
4266 SET_NEXT_INSN (note_list) = insn;
4267 note_list = insn;
4268 break;
4269 }
4270
4271 gcc_assert ((sel_sched_p () || insn != tail) && insn != head)((void)(!((sel_sched_p () || insn != tail) && insn !=
head) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4271, __FUNCTION__), 0 : 0))
;
4272 }
4273}
4274
4275/* A structure to record enough data to allow us to backtrack the scheduler to
4276 a previous state. */
4277struct haifa_saved_data
4278{
4279 /* Next entry on the list. */
4280 struct haifa_saved_data *next;
4281
4282 /* Backtracking is associated with scheduling insns that have delay slots.
4283 DELAY_PAIR points to the structure that contains the insns involved, and
4284 the number of cycles between them. */
4285 struct delay_pair *delay_pair;
4286
4287 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4288 void *fe_saved_data;
4289 /* Data used by the backend. */
4290 void *be_saved_data;
4291
4292 /* Copies of global state. */
4293 int clock_var, last_clock_var;
4294 struct ready_list ready;
4295 state_t curr_state;
4296
4297 rtx_insn *last_scheduled_insn;
4298 rtx_insn *last_nondebug_scheduled_insn;
4299 rtx_insn *nonscheduled_insns_begin;
4300 int cycle_issued_insns;
4301
4302 /* Copies of state used in the inner loop of schedule_block. */
4303 struct sched_block_state sched_block;
4304
4305 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4306 to 0 when restoring. */
4307 int q_size;
4308 rtx_insn_list **insn_queue;
4309
4310 /* Describe pattern replacements that occurred since this backtrack point
4311 was queued. */
4312 vec<dep_t> replacement_deps;
4313 vec<int> replace_apply;
4314
4315 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4316 point. */
4317 vec<dep_t> next_cycle_deps;
4318 vec<int> next_cycle_apply;
4319};
4320
4321/* A record, in reverse order, of all scheduled insns which have delay slots
4322 and may require backtracking. */
4323static struct haifa_saved_data *backtrack_queue;
4324
4325/* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4326 to SET_P. */
4327static void
4328mark_backtrack_feeds (rtx_insn *insn, int set_p)
4329{
4330 sd_iterator_def sd_it;
4331 dep_t dep;
4332 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn), ((1))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
4333 {
4334 FEEDS_BACKTRACK_INSN (DEP_PRO (dep))((&h_i_d[INSN_UID (((dep)->pro))])->feeds_backtrack_insn
)
= set_p;
4335 }
4336}
4337
4338/* Save the current scheduler state so that we can backtrack to it
4339 later if necessary. PAIR gives the insns that make it necessary to
4340 save this point. SCHED_BLOCK is the local state of schedule_block
4341 that need to be saved. */
4342static void
4343save_backtrack_point (struct delay_pair *pair,
4344 struct sched_block_state sched_block)
4345{
4346 int i;
4347 struct haifa_saved_data *save = XNEW (struct haifa_saved_data)((struct haifa_saved_data *) xmalloc (sizeof (struct haifa_saved_data
)))
;
4348
4349 save->curr_state = xmalloc (dfa_state_size);
4350 memcpy (save->curr_state, curr_state, dfa_state_size);
4351
4352 save->ready.first = ready.first;
4353 save->ready.n_ready = ready.n_ready;
4354 save->ready.n_debug = ready.n_debug;
4355 save->ready.veclen = ready.veclen;
4356 save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen)((rtx_insn * *) xmalloc (sizeof (rtx_insn *) * (ready.veclen)
))
;
4357 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4358
4359 save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1)((rtx_insn_list * *) xmalloc (sizeof (rtx_insn_list *) * (max_insn_queue_index
+ 1)))
;
4360 save->q_size = q_size;
4361 for (i = 0; i <= max_insn_queue_index; i++)
4362 {
4363 int q = NEXT_Q_AFTER (q_ptr, i)(((q_ptr)+i) & max_insn_queue_index);
4364 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4365 }
4366
4367 save->clock_var = clock_var;
4368 save->last_clock_var = last_clock_var;
4369 save->cycle_issued_insns = cycle_issued_insns;
4370 save->last_scheduled_insn = last_scheduled_insn;
4371 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4372 save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4373
4374 save->sched_block = sched_block;
4375
4376 save->replacement_deps.create (0);
4377 save->replace_apply.create (0);
4378 save->next_cycle_deps = next_cycle_replace_deps.copy ();
4379 save->next_cycle_apply = next_cycle_apply.copy ();
4380
4381 if (current_sched_info->save_state)
4382 save->fe_saved_data = (*current_sched_info->save_state) ();
4383
4384 if (targetm.sched.alloc_sched_context)
4385 {
4386 save->be_saved_data = targetm.sched.alloc_sched_context ();
4387 targetm.sched.init_sched_context (save->be_saved_data, false);
4388 }
4389 else
4390 save->be_saved_data = NULLnullptr;
4391
4392 save->delay_pair = pair;
4393
4394 save->next = backtrack_queue;
4395 backtrack_queue = save;
4396
4397 while (pair)
4398 {
4399 mark_backtrack_feeds (pair->i2, 1);
4400 INSN_TICK (pair->i2)((&h_i_d[INSN_UID (pair->i2)])->tick) = INVALID_TICK(-(max_insn_queue_index + 1));
4401 INSN_EXACT_TICK (pair->i2)((&h_i_d[INSN_UID (pair->i2)])->exact_tick) = clock_var + pair_delay (pair);
4402 SHADOW_P (pair->i2)((&h_i_d[INSN_UID (pair->i2)])->shadow_p) = pair->stages == 0;
4403 pair = pair->next_same_i1;
4404 }
4405}
4406
4407/* Walk the ready list and all queues. If any insns have unresolved backwards
4408 dependencies, these must be cancelled deps, broken by predication. Set or
4409 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4410
4411static void
4412toggle_cancelled_flags (bool set)
4413{
4414 int i;
4415 sd_iterator_def sd_it;
4416 dep_t dep;
4417
4418 if (ready.n_ready > 0)
4419 {
4420 rtx_insn **first = ready_lastpos (&ready);
4421 for (i = 0; i < ready.n_ready; i++)
4422 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((first[i]), (((1) | (2))));
sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next
(&(sd_it)))
4423 if (!DEBUG_INSN_P (DEP_PRO (dep))(((enum rtx_code) (((dep)->pro))->code) == DEBUG_INSN))
4424 {
4425 if (set)
4426 DEP_STATUS (dep)((dep)->status) |= DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
4427 else
4428 DEP_STATUS (dep)((dep)->status) &= ~DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
4429 }
4430 }
4431 for (i = 0; i <= max_insn_queue_index; i++)
4432 {
4433 int q = NEXT_Q_AFTER (q_ptr, i)(((q_ptr)+i) & max_insn_queue_index);
4434 rtx_insn_list *link;
4435 for (link = insn_queue[q]; link; link = link->next ())
4436 {
4437 rtx_insn *insn = link->insn ();
4438 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn), (((1) | (2)))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
4439 if (!DEBUG_INSN_P (DEP_PRO (dep))(((enum rtx_code) (((dep)->pro))->code) == DEBUG_INSN))
4440 {
4441 if (set)
4442 DEP_STATUS (dep)((dep)->status) |= DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
4443 else
4444 DEP_STATUS (dep)((dep)->status) &= ~DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
;
4445 }
4446 }
4447 }
4448}
4449
4450/* Undo the replacements that have occurred after backtrack point SAVE
4451 was placed. */
4452static void
4453undo_replacements_for_backtrack (struct haifa_saved_data *save)
4454{
4455 while (!save->replacement_deps.is_empty ())
4456 {
4457 dep_t dep = save->replacement_deps.pop ();
4458 int apply_p = save->replace_apply.pop ();
4459
4460 if (apply_p)
4461 restore_pattern (dep, true);
4462 else
4463 apply_replacement (dep, true);
4464 }
4465 save->replacement_deps.release ();
4466 save->replace_apply.release ();
4467}
4468
4469/* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4470 Restore their dependencies to an unresolved state, and mark them as
4471 queued nowhere. */
4472
4473static void
4474unschedule_insns_until (rtx_insn *insn)
4475{
4476 auto_vec<rtx_insn *> recompute_vec;
4477
4478 /* Make two passes over the insns to be unscheduled. First, we clear out
4479 dependencies and other trivial bookkeeping. */
4480 for (;;)
4481 {
4482 rtx_insn *last;
4483 sd_iterator_def sd_it;
4484 dep_t dep;
4485
4486 last = scheduled_insns.pop ();
4487
4488 /* This will be changed by restore_backtrack_point if the insn is in
4489 any queue. */
4490 QUEUE_INDEX (last)((&h_i_d[INSN_UID (last)])->queue_index) = QUEUE_NOWHERE(-2);
4491 if (last != insn)
4492 INSN_TICK (last)((&h_i_d[INSN_UID (last)])->tick) = INVALID_TICK(-(max_insn_queue_index + 1));
4493
4494 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4495 modulo_insns_scheduled--;
4496
4497 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW(16));
4498 sd_iterator_cond (&sd_it, &dep);)
4499 {
4500 rtx_insn *con = DEP_CON (dep)((dep)->con);
4501 sd_unresolve_dep (sd_it);
4502 if (!MUST_RECOMPUTE_SPEC_P (con)((&h_i_d[INSN_UID (con)])->must_recompute_spec))
4503 {
4504 MUST_RECOMPUTE_SPEC_P (con)((&h_i_d[INSN_UID (con)])->must_recompute_spec) = 1;
4505 recompute_vec.safe_push (con);
4506 }
4507 }
4508
4509 if (last == insn)
4510 break;
4511 }
4512
4513 /* A second pass, to update ready and speculation status for insns
4514 depending on the unscheduled ones. The first pass must have
4515 popped the scheduled_insns vector up to the point where we
4516 restart scheduling, as recompute_todo_spec requires it to be
4517 up-to-date. */
4518 while (!recompute_vec.is_empty ())
4519 {
4520 rtx_insn *con;
4521
4522 con = recompute_vec.pop ();
4523 MUST_RECOMPUTE_SPEC_P (con)((&h_i_d[INSN_UID (con)])->must_recompute_spec) = 0;
4524 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK(1)))
4525 {
4526 TODO_SPEC (con)((&h_i_d[INSN_UID (con)])->todo_spec) = HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
4527 INSN_TICK (con)((&h_i_d[INSN_UID (con)])->tick) = INVALID_TICK(-(max_insn_queue_index + 1));
4528 if (PREDICATED_PAT (con)((&h_i_d[INSN_UID (con)])->predicated_pat) != NULL_RTX(rtx) 0)
4529 haifa_change_pattern (con, ORIG_PAT (con)((&h_i_d[INSN_UID (con)])->orig_pat));
4530 }
4531 else if (QUEUE_INDEX (con)((&h_i_d[INSN_UID (con)])->queue_index) != QUEUE_SCHEDULED(-3))
4532 TODO_SPEC (con)((&h_i_d[INSN_UID (con)])->todo_spec) = recompute_todo_spec (con, true);
4533 }
4534}
4535
4536/* Restore scheduler state from the topmost entry on the backtracking queue.
4537 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4538 overwrite with the saved data.
4539 The caller must already have called unschedule_insns_until. */
4540
4541static void
4542restore_last_backtrack_point (struct sched_block_state *psched_block)
4543{
4544 int i;
4545 struct haifa_saved_data *save = backtrack_queue;
4546
4547 backtrack_queue = save->next;
4548
4549 if (current_sched_info->restore_state)
4550 (*current_sched_info->restore_state) (save->fe_saved_data);
4551
4552 if (targetm.sched.alloc_sched_context)
4553 {
4554 targetm.sched.set_sched_context (save->be_saved_data);
4555 targetm.sched.free_sched_context (save->be_saved_data);
4556 }
4557
4558 /* Do this first since it clobbers INSN_TICK of the involved
4559 instructions. */
4560 undo_replacements_for_backtrack (save);
4561
4562 /* Clear the QUEUE_INDEX of everything in the ready list or one
4563 of the queues. */
4564 if (ready.n_ready > 0)
4565 {
4566 rtx_insn **first = ready_lastpos (&ready);
4567 for (i = 0; i < ready.n_ready; i++)
4568 {
4569 rtx_insn *insn = first[i];
4570 QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) = QUEUE_NOWHERE(-2);
4571 INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick) = INVALID_TICK(-(max_insn_queue_index + 1));
4572 }
4573 }
4574 for (i = 0; i <= max_insn_queue_index; i++)
4575 {
4576 int q = NEXT_Q_AFTER (q_ptr, i)(((q_ptr)+i) & max_insn_queue_index);
4577
4578 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4579 {
4580 rtx_insn *x = link->insn ();
4581 QUEUE_INDEX (x)((&h_i_d[INSN_UID (x)])->queue_index) = QUEUE_NOWHERE(-2);
4582 INSN_TICK (x)((&h_i_d[INSN_UID (x)])->tick) = INVALID_TICK(-(max_insn_queue_index + 1));
4583 }
4584 free_INSN_LIST_list (&insn_queue[q]);
4585 }
4586
4587 free (ready.vec);
4588 ready = save->ready;
4589
4590 if (ready.n_ready > 0)
4591 {
4592 rtx_insn **first = ready_lastpos (&ready);
4593 for (i = 0; i < ready.n_ready; i++)
4594 {
4595 rtx_insn *insn = first[i];
4596 QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) = QUEUE_READY(-1);
4597 TODO_SPEC (insn)((&h_i_d[INSN_UID (insn)])->todo_spec) = recompute_todo_spec (insn, true);
4598 INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick) = save->clock_var;
4599 }
4600 }
4601
4602 q_ptr = 0;
4603 q_size = save->q_size;
4604 for (i = 0; i <= max_insn_queue_index; i++)
4605 {
4606 int q = NEXT_Q_AFTER (q_ptr, i)(((q_ptr)+i) & max_insn_queue_index);
4607
4608 insn_queue[q] = save->insn_queue[q];
4609
4610 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4611 {
4612 rtx_insn *x = link->insn ();
4613 QUEUE_INDEX (x)((&h_i_d[INSN_UID (x)])->queue_index) = i;
4614 TODO_SPEC (x)((&h_i_d[INSN_UID (x)])->todo_spec) = recompute_todo_spec (x, true);
4615 INSN_TICK (x)((&h_i_d[INSN_UID (x)])->tick) = save->clock_var + i;
4616 }
4617 }
4618 free (save->insn_queue);
4619
4620 toggle_cancelled_flags (true);
4621
4622 clock_var = save->clock_var;
4623 last_clock_var = save->last_clock_var;
4624 cycle_issued_insns = save->cycle_issued_insns;
4625 last_scheduled_insn = save->last_scheduled_insn;
4626 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4627 nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4628
4629 *psched_block = save->sched_block;
4630
4631 memcpy (curr_state, save->curr_state, dfa_state_size);
4632 free (save->curr_state);
4633
4634 mark_backtrack_feeds (save->delay_pair->i2, 0);
4635
4636 gcc_assert (next_cycle_replace_deps.is_empty ())((void)(!(next_cycle_replace_deps.is_empty ()) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4636, __FUNCTION__), 0 : 0))
;
4637 next_cycle_replace_deps = save->next_cycle_deps.copy ();
4638 next_cycle_apply = save->next_cycle_apply.copy ();
4639
4640 free (save);
4641
4642 for (save = backtrack_queue; save; save = save->next)
4643 {
4644 mark_backtrack_feeds (save->delay_pair->i2, 1);
4645 }
4646}
4647
4648/* Discard all data associated with the topmost entry in the backtrack
4649 queue. If RESET_TICK is false, we just want to free the data. If true,
4650 we are doing this because we discovered a reason to backtrack. In the
4651 latter case, also reset the INSN_TICK for the shadow insn. */
4652static void
4653free_topmost_backtrack_point (bool reset_tick)
4654{
4655 struct haifa_saved_data *save = backtrack_queue;
4656 int i;
4657
4658 backtrack_queue = save->next;
4659
4660 if (reset_tick)
4661 {
4662 struct delay_pair *pair = save->delay_pair;
4663 while (pair)
4664 {
4665 INSN_TICK (pair->i2)((&h_i_d[INSN_UID (pair->i2)])->tick) = INVALID_TICK(-(max_insn_queue_index + 1));
4666 INSN_EXACT_TICK (pair->i2)((&h_i_d[INSN_UID (pair->i2)])->exact_tick) = INVALID_TICK(-(max_insn_queue_index + 1));
4667 pair = pair->next_same_i1;
4668 }
4669 undo_replacements_for_backtrack (save);
4670 }
4671 else
4672 {
4673 save->replacement_deps.release ();
4674 save->replace_apply.release ();
4675 }
4676
4677 if (targetm.sched.free_sched_context)
4678 targetm.sched.free_sched_context (save->be_saved_data);
4679 if (current_sched_info->restore_state)
4680 free (save->fe_saved_data);
4681 for (i = 0; i <= max_insn_queue_index; i++)
4682 free_INSN_LIST_list (&save->insn_queue[i]);
4683 free (save->insn_queue);
4684 free (save->curr_state);
4685 free (save->ready.vec);
4686 free (save);
4687}
4688
4689/* Free the entire backtrack queue. */
4690static void
4691free_backtrack_queue (void)
4692{
4693 while (backtrack_queue)
4694 free_topmost_backtrack_point (false);
4695}
4696
4697/* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4698 may have to postpone the replacement until the start of the next cycle,
4699 at which point we will be called again with IMMEDIATELY true. This is
4700 only done for machines which have instruction packets with explicit
4701 parallelism however. */
4702static void
4703apply_replacement (dep_t dep, bool immediately)
4704{
4705 struct dep_replacement *desc = DEP_REPLACE (dep)((dep)->replace);
4706 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4707 {
4708 next_cycle_replace_deps.safe_push (dep);
4709 next_cycle_apply.safe_push (1);
4710 }
4711 else
4712 {
4713 bool success;
4714
4715 if (QUEUE_INDEX (desc->insn)((&h_i_d[INSN_UID (desc->insn)])->queue_index) == QUEUE_SCHEDULED(-3))
4716 return;
4717
4718 if (sched_verbose >= 5)
4719 fprintf (sched_dump, "applying replacement for insn %d\n",
4720 INSN_UID (desc->insn));
4721
4722 success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4723 gcc_assert (success)((void)(!(success) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4723, __FUNCTION__), 0 : 0))
;
4724
4725 rtx_insn *insn = DEP_PRO (dep)((dep)->pro);
4726
4727 /* Recompute priority since dependent priorities may have changed. */
4728 priority (insn, true);
4729 update_insn_after_change (desc->insn);
4730
4731 if ((TODO_SPEC (desc->insn)((&h_i_d[INSN_UID (desc->insn)])->todo_spec) & (HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
| DEP_POSTPONED((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1)
)) == 0)
4732 fix_tick_ready (desc->insn);
4733
4734 if (backtrack_queue != NULLnullptr)
4735 {
4736 backtrack_queue->replacement_deps.safe_push (dep);
4737 backtrack_queue->replace_apply.safe_push (1);
4738 }
4739 }
4740}
4741
4742/* We have determined that a pattern involved in DEP must be restored.
4743 If IMMEDIATELY is false, we may have to postpone the replacement
4744 until the start of the next cycle, at which point we will be called
4745 again with IMMEDIATELY true. */
4746static void
4747restore_pattern (dep_t dep, bool immediately)
4748{
4749 rtx_insn *next = DEP_CON (dep)((dep)->con);
4750 int tick = INSN_TICK (next)((&h_i_d[INSN_UID (next)])->tick);
4751
4752 /* If we already scheduled the insn, the modified version is
4753 correct. */
4754 if (QUEUE_INDEX (next)((&h_i_d[INSN_UID (next)])->queue_index) == QUEUE_SCHEDULED(-3))
4755 return;
4756
4757 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4758 {
4759 next_cycle_replace_deps.safe_push (dep);
4760 next_cycle_apply.safe_push (0);
4761 return;
4762 }
4763
4764
4765 if (DEP_TYPE (dep)((dep)->type) == REG_DEP_CONTROL)
4766 {
4767 if (sched_verbose >= 5)
4768 fprintf (sched_dump, "restoring pattern for insn %d\n",
4769 INSN_UID (next));
4770 haifa_change_pattern (next, ORIG_PAT (next)((&h_i_d[INSN_UID (next)])->orig_pat));
4771 }
4772 else
4773 {
4774 struct dep_replacement *desc = DEP_REPLACE (dep)((dep)->replace);
4775 bool success;
4776
4777 if (sched_verbose >= 5)
4778 fprintf (sched_dump, "restoring pattern for insn %d\n",
4779 INSN_UID (desc->insn));
4780 tick = INSN_TICK (desc->insn)((&h_i_d[INSN_UID (desc->insn)])->tick);
4781
4782 success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4783 gcc_assert (success)((void)(!(success) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4783, __FUNCTION__), 0 : 0))
;
4784
4785 rtx_insn *insn = DEP_PRO (dep)((dep)->pro);
4786
4787 if (QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) != QUEUE_SCHEDULED(-3))
4788 {
4789 /* Recompute priority since dependent priorities may have changed. */
4790 priority (insn, true);
4791 }
4792
4793 update_insn_after_change (desc->insn);
4794
4795 if (backtrack_queue != NULLnullptr)
4796 {
4797 backtrack_queue->replacement_deps.safe_push (dep);
4798 backtrack_queue->replace_apply.safe_push (0);
4799 }
4800 }
4801 INSN_TICK (next)((&h_i_d[INSN_UID (next)])->tick) = tick;
4802 if (TODO_SPEC (next)((&h_i_d[INSN_UID (next)])->todo_spec) == DEP_POSTPONED((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1)
)
4803 return;
4804
4805 if (sd_lists_empty_p (next, SD_LIST_BACK((1) | (2))))
4806 TODO_SPEC (next)((&h_i_d[INSN_UID (next)])->todo_spec) = 0;
4807 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK(1)))
4808 TODO_SPEC (next)((&h_i_d[INSN_UID (next)])->todo_spec) = HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4
) - 8) / 4))) << 1) << 1) << 1) << 1)
;
4809}
4810
4811/* Perform pattern replacements that were queued up until the next
4812 cycle. */
4813static void
4814perform_replacements_new_cycle (void)
4815{
4816 int i;
4817 dep_t dep;
4818 FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)for (i = 0; (next_cycle_replace_deps).iterate ((i), &(dep
)); ++(i))
4819 {
4820 int apply_p = next_cycle_apply[i];
4821 if (apply_p)
4822 apply_replacement (dep, true);
4823 else
4824 restore_pattern (dep, true);
4825 }
4826 next_cycle_replace_deps.truncate (0);
4827 next_cycle_apply.truncate (0);
4828}
4829
4830/* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4831 instructions we've previously encountered, a set bit prevents
4832 recursion. BUDGET is a limit on how far ahead we look, it is
4833 reduced on recursive calls. Return true if we produced a good
4834 estimate, or false if we exceeded the budget. */
4835static bool
4836estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4837{
4838 sd_iterator_def sd_it;
4839 dep_t dep;
4840 int earliest = INSN_TICK (insn)((&h_i_d[INSN_UID (insn)])->tick);
4841
4842 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn), (((1) | (2)))); sd_iterator_cond
(&(sd_it), &(dep)); sd_iterator_next (&(sd_it)))
4843 {
4844 rtx_insn *pro = DEP_PRO (dep)((dep)->pro);
4845 int t;
4846
4847 if (DEP_STATUS (dep)((dep)->status) & DEP_CANCELLED(((((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 *
4) - 8) / 4))) << 1) << 1) << 1) << 1
) << 1) << 1)
)
4848 continue;
4849
4850 if (QUEUE_INDEX (pro)((&h_i_d[INSN_UID (pro)])->queue_index) == QUEUE_SCHEDULED(-3))
4851 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn))((void)(!(((&h_i_d[INSN_UID (pro)])->tick) + dep_cost (
dep) <= ((&h_i_d[INSN_UID (insn)])->tick)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4851, __FUNCTION__), 0 : 0))
;
4852 else
4853 {
4854 int cost = dep_cost (dep);
4855 if (cost >= budget)
4856 return false;
4857 if (!bitmap_bit_p (processed, INSN_LUID (pro)(sched_luids[INSN_UID (pro)])))
4858 {
4859 if (!estimate_insn_tick (processed, pro, budget - cost))
4860 return false;
4861 }
4862 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK)((void)(!(((&h_i_d[INSN_UID (pro)])->tick_estimate) !=
(-(max_insn_queue_index + 1))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4862, __FUNCTION__), 0 : 0))
;
4863 t = INSN_TICK_ESTIMATE (pro)((&h_i_d[INSN_UID (pro)])->tick_estimate) + cost;
4864 if (earliest == INVALID_TICK(-(max_insn_queue_index + 1)) || t > earliest)
4865 earliest = t;
4866 }
4867 }
4868 bitmap_set_bit (processed, INSN_LUID (insn)(sched_luids[INSN_UID (insn)]));
4869 INSN_TICK_ESTIMATE (insn)((&h_i_d[INSN_UID (insn)])->tick_estimate) = earliest;
4870 return true;
4871}
4872
4873/* Examine the pair of insns in P, and estimate (optimistically, assuming
4874 infinite resources) the cycle in which the delayed shadow can be issued.
4875 Return the number of cycles that must pass before the real insn can be
4876 issued in order to meet this constraint. */
4877static int
4878estimate_shadow_tick (struct delay_pair *p)
4879{
4880 auto_bitmap processed;
4881 int t;
4882 bool cutoff;
4883
4884 cutoff = !estimate_insn_tick (processed, p->i2,
4885 max_insn_queue_index + pair_delay (p));
4886 if (cutoff)
4887 return max_insn_queue_index;
4888 t = INSN_TICK_ESTIMATE (p->i2)((&h_i_d[INSN_UID (p->i2)])->tick_estimate) - (clock_var + pair_delay (p) + 1);
4889 if (t > 0)
4890 return t;
4891 return 0;
4892}
4893
4894/* If INSN has no unresolved backwards dependencies, add it to the schedule and
4895 recursively resolve all its forward dependencies. */
4896static void
4897resolve_dependencies (rtx_insn *insn)
4898{
4899 sd_iterator_def sd_it;
4900 dep_t dep;
4901
4902 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4903 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn))((((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->hard_back_deps
))->first)
!= NULLnullptr
4904 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn))((((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->spec_back_deps
))->first)
!= NULLnullptr)
4905 return;
4906
4907 if (sched_verbose >= 4)
4908 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4909
4910 if (QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) >= 0)
4911 queue_remove (insn);
4912
4913 scheduled_insns.safe_push (insn);
4914
4915 /* Update dependent instructions. */
4916 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW(4));
4917 sd_iterator_cond (&sd_it, &dep);)
4918 {
4919 rtx_insn *next = DEP_CON (dep)((dep)->con);
4920
4921 if (sched_verbose >= 4)
4922 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4923 INSN_UID (next));
4924
4925 /* Resolve the dependence between INSN and NEXT.
4926 sd_resolve_dep () moves current dep to another list thus
4927 advancing the iterator. */
4928 sd_resolve_dep (sd_it);
4929
4930 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn)(((&h_i_d[INSN_UID (insn)])->recovery_block) != nullptr
&& ((&h_i_d[INSN_UID (insn)])->recovery_block
) != (((cfun + 0))->cfg->x_exit_block_ptr))
)
4931 {
4932 resolve_dependencies (next);
4933 }
4934 else
4935 /* Check always has only one forward dependence (to the first insn in
4936 the recovery block), therefore, this will be executed only once. */
4937 {
4938 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW))((void)(!(sd_lists_empty_p (insn, (4))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 4938, __FUNCTION__), 0 : 0))
;
4939 }
4940 }
4941}
4942
4943
4944/* Return the head and tail pointers of ebb starting at BEG and ending
4945 at END. */
4946void
4947get_ebb_head_tail (basic_block beg, basic_block end,
4948 rtx_insn **headp, rtx_insn **tailp)
4949{
4950 rtx_insn *beg_head = BB_HEAD (beg)(beg)->il.x.head_;
4951 rtx_insn * beg_tail = BB_END (beg)(beg)->il.x.rtl->end_;
4952 rtx_insn * end_head = BB_HEAD (end)(end)->il.x.head_;
4953 rtx_insn * end_tail = BB_END (end)(end)->il.x.rtl->end_;
4954
4955 /* Don't include any notes or labels at the beginning of the BEG
4956 basic block, or notes at the end of the END basic blocks. */
4957
4958 if (LABEL_P (beg_head)(((enum rtx_code) (beg_head)->code) == CODE_LABEL))
4959 beg_head = NEXT_INSN (beg_head);
4960
4961 while (beg_head != beg_tail)
4962 if (NOTE_P (beg_head)(((enum rtx_code) (beg_head)->code) == NOTE))
4963 beg_head = NEXT_INSN (beg_head);
4964 else if (DEBUG_INSN_P (beg_head)(((enum rtx_code) (beg_head)->code) == DEBUG_INSN))
4965 {
4966 rtx_insn * note, *next;
4967
4968 for (note = NEXT_INSN (beg_head);
4969 note != beg_tail;
4970 note = next)
4971 {
4972 next = NEXT_INSN (note);
4973 if (NOTE_P (note)(((enum rtx_code) (note)->code) == NOTE))
4974 {
4975 if (sched_verbose >= 9)
4976 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4977
4978 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4979
4980 if (BLOCK_FOR_INSN (note) != beg)
4981 df_insn_change_bb (note, beg);
4982 }
4983 else if (!DEBUG_INSN_P (note)(((enum rtx_code) (note)->code) == DEBUG_INSN))
4984 break;
4985 }
4986
4987 break;
4988 }
4989 else
4990 break;
4991
4992 *headp = beg_head;
4993
4994 if (beg == end)
4995 end_head = beg_head;
4996 else if (LABEL_P (end_head)(((enum rtx_code) (end_head)->code) == CODE_LABEL))
4997 end_head = NEXT_INSN (end_head);
4998
4999 while (end_head != end_tail)
5000 if (NOTE_P (end_tail)(((enum rtx_code) (end_tail)->code) == NOTE))
5001 end_tail = PREV_INSN (end_tail);
5002 else if (DEBUG_INSN_P (end_tail)(((enum rtx_code) (end_tail)->code) == DEBUG_INSN))
5003 {
5004 rtx_insn * note, *prev;
5005
5006 for (note = PREV_INSN (end_tail);
5007 note != end_head;
5008 note = prev)
5009 {
5010 prev = PREV_INSN (note);
5011 if (NOTE_P (note)(((enum rtx_code) (note)->code) == NOTE))
5012 {
5013 if (sched_verbose >= 9)
5014 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
5015
5016 reorder_insns_nobb (note, note, end_tail);
5017
5018 if (end_tail == BB_END (end)(end)->il.x.rtl->end_)
5019 BB_END (end)(end)->il.x.rtl->end_ = note;
5020
5021 if (BLOCK_FOR_INSN (note) != end)
5022 df_insn_change_bb (note, end);
5023 }
5024 else if (!DEBUG_INSN_P (note)(((enum rtx_code) (note)->code) == DEBUG_INSN))
5025 break;
5026 }
5027
5028 break;
5029 }
5030 else
5031 break;
5032
5033 *tailp = end_tail;
5034}
5035
5036/* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
5037
5038int
5039no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
5040{
5041 while (head != NEXT_INSN (tail))
5042 {
5043 if (!NOTE_P (head)(((enum rtx_code) (head)->code) == NOTE) && !LABEL_P (head)(((enum rtx_code) (head)->code) == CODE_LABEL))
5044 return 0;
5045 head = NEXT_INSN (head);
5046 }
5047 return 1;
5048}
5049
5050/* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5051 previously found among the insns. Insert them just before HEAD. */
5052rtx_insn *
5053restore_other_notes (rtx_insn *head, basic_block head_bb)
5054{
5055 if (note_list != 0)
5056 {
5057 rtx_insn *note_head = note_list;
5058
5059 if (head)
5060 head_bb = BLOCK_FOR_INSN (head);
5061 else
5062 head = NEXT_INSN (bb_note (head_bb));
5063
5064 while (PREV_INSN (note_head))
5065 {
5066 set_block_for_insn (note_head, head_bb);
5067 note_head = PREV_INSN (note_head);
5068 }
5069 /* In the above cycle we've missed this note. */
5070 set_block_for_insn (note_head, head_bb);
5071
5072 SET_PREV_INSN (note_head) = PREV_INSN (head);
5073 SET_NEXT_INSN (PREV_INSN (head)) = note_head;
5074 SET_PREV_INSN (head) = note_list;
5075 SET_NEXT_INSN (note_list) = head;
5076
5077 if (BLOCK_FOR_INSN (head) != head_bb)
5078 BB_END (head_bb)(head_bb)->il.x.rtl->end_ = note_list;
5079
5080 head = note_head;
5081 }
5082
5083 return head;
5084}
5085
5086/* When we know we are going to discard the schedule due to a failed attempt
5087 at modulo scheduling, undo all replacements. */
5088static void
5089undo_all_replacements (void)
5090{
5091 rtx_insn *insn;
5092 int i;
5093
5094 FOR_EACH_VEC_ELT (scheduled_insns, i, insn)for (i = 0; (scheduled_insns).iterate ((i), &(insn)); ++(
i))
5095 {
5096 sd_iterator_def sd_it;
5097 dep_t dep;
5098
5099 /* See if we must undo a replacement. */
5100 for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW(16));
5101 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
5102 {
5103 struct dep_replacement *desc = DEP_REPLACE (dep)((dep)->replace);
5104 if (desc != NULLnullptr)
5105 validate_change (desc->insn, desc->loc, desc->orig, 0);
5106 }
5107 }
5108}
5109
5110/* Return first non-scheduled insn in the current scheduling block.
5111 This is mostly used for debug-counter purposes. */
5112static rtx_insn *
5113first_nonscheduled_insn (void)
5114{
5115 rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX(rtx) 0
5116 ? nonscheduled_insns_begin
5117 : current_sched_info->prev_head);
5118
5119 do
5120 {
5121 insn = next_nonnote_nondebug_insn (insn);
5122 }
5123 while (QUEUE_INDEX (insn)((&h_i_d[INSN_UID (insn)])->queue_index) == QUEUE_SCHEDULED(-3));
5124
5125 return insn;
5126}
5127
5128/* Move insns that became ready to fire from queue to ready list. */
5129
5130static void
5131queue_to_ready (struct ready_list *ready)
5132{
5133 rtx_insn *insn;
5134 rtx_insn_list *link;
5135 rtx_insn *skip_insn;
5136
5137 q_ptr = NEXT_Q (q_ptr)(((q_ptr)+1) & max_insn_queue_index);
5138
5139 if (dbg_cnt (sched_insn) == false)
5140 /* If debug counter is activated do not requeue the first
5141 nonscheduled insn. */
5142 skip_insn = first_nonscheduled_insn ();
5143 else
5144 skip_insn = NULLnullptr;
5145
5146 /* Add all pending insns that can be scheduled without stalls to the
5147 ready list. */
5148 for (link = insn_queue[q_ptr]; link; link = link->next ())
5149 {
5150 insn = link->insn ();
5151 q_size -= 1;
5152
5153 if (sched_verbose >= 2)
5154 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5155 (*current_sched_info->print_insn) (insn, 0));
5156
5157 /* If the ready list is full, delay the insn for 1 cycle.
5158 See the comment in schedule_block for the rationale. */
5159 if (!reload_completed
5160 && (ready->n_ready - ready->n_debug > param_max_sched_ready_insnsglobal_options.x_param_max_sched_ready_insns
5161 || (sched_pressure == SCHED_PRESSURE_MODEL
5162 /* Limit pressure recalculations to
5163 param_max_sched_ready_insns instructions too. */
5164 && model_index (insn) > (model_curr_point
5165 + param_max_sched_ready_insnsglobal_options.x_param_max_sched_ready_insns)))
5166 && !(sched_pressure == SCHED_PRESSURE_MODEL
5167 && model_curr_point < model_num_insns
5168 /* Always allow the next model instruction to issue. */
5169 && model_index (insn) == model_curr_point)
5170 && !SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) (
_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5170, __FUNCTION__); _rtx; })->in_struct)
5171 && insn != skip_insn)
5172 {
5173 if (sched_verbose >= 2)
5174 fprintf (sched_dump, "keeping in queue, ready full\n");
5175 queue_insn (insn, 1, "ready full");
5176 }
5177 else
5178 {
5179 ready_add (ready, insn, false);
5180 if (sched_verbose >= 2)
5181 fprintf (sched_dump, "moving to ready without stalls\n");
5182 }
5183 }
5184 free_INSN_LIST_list (&insn_queue[q_ptr]);
5185
5186 /* If there are no ready insns, stall until one is ready and add all
5187 of the pending insns at that point to the ready list. */
5188 if (ready->n_ready == 0)
5189 {
5190 int stalls;
5191
5192 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5193 {
5194 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)(((q_ptr)+stalls) & max_insn_queue_index)]))
5195 {
5196 for (; link; link = link->next ())
5197 {
5198 insn = link->insn ();
5199 q_size -= 1;
5200
5201 if (sched_verbose >= 2)
5202 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5203 (*current_sched_info->print_insn) (insn, 0));
5204
5205 ready_add (ready, insn, false);
5206 if (sched_verbose >= 2)
5207 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5208 }
5209 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)(((q_ptr)+stalls) & max_insn_queue_index)]);
5210
5211 advance_one_cycle ();
5212
5213 break;
5214 }
5215
5216 advance_one_cycle ();
5217 }
5218
5219 q_ptr = NEXT_Q_AFTER (q_ptr, stalls)(((q_ptr)+stalls) & max_insn_queue_index);
5220 clock_var += stalls;
5221 if (sched_verbose >= 2)
5222 fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5223 stalls, clock_var);
5224 }
5225}
5226
5227/* Used by early_queue_to_ready. Determines whether it is "ok" to
5228 prematurely move INSN from the queue to the ready list. Currently,
5229 if a target defines the hook 'is_costly_dependence', this function
5230 uses the hook to check whether there exist any dependences which are
5231 considered costly by the target, between INSN and other insns that
5232 have already been scheduled. Dependences are checked up to Y cycles
5233 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5234 controlling this value.
5235 (Other considerations could be taken into account instead (or in
5236 addition) depending on user flags and target hooks. */
5237
5238static bool
5239ok_for_early_queue_removal (rtx_insn *insn)
5240{
5241 if (targetm.sched.is_costly_dependence)
5242 {
5243 int n_cycles;
5244 int i = scheduled_insns.length ();
5245 for (n_cycles = flag_sched_stalled_insns_depglobal_options.x_flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5246 {
5247 while (i-- > 0)
5248 {
5249 int cost;
5250
5251 rtx_insn *prev_insn = scheduled_insns[i];
5252
5253 if (!NOTE_P (prev_insn)(((enum rtx_code) (prev_insn)->code) == NOTE))
5254 {
5255 dep_t dep;
5256
5257 dep = sd_find_dep_between (prev_insn, insn, true);
5258
5259 if (dep != NULLnullptr)
5260 {
5261 cost = dep_cost (dep);
5262
5263 if (targetm.sched.is_costly_dependence (dep, cost,
5264 flag_sched_stalled_insns_depglobal_options.x_flag_sched_stalled_insns_dep - n_cycles))
5265 return false;
5266 }
5267 }
5268
5269 if (GET_MODE (prev_insn)((machine_mode) (prev_insn)->mode) == TImode(scalar_int_mode ((scalar_int_mode::from_int) E_TImode))) /* end of dispatch group */
5270 break;
5271 }
5272
5273 if (i == 0)
5274 break;
5275 }
5276 }
5277
5278 return true;
5279}
5280
5281
5282/* Remove insns from the queue, before they become "ready" with respect
5283 to FU latency considerations. */
5284
5285static int
5286early_queue_to_ready (state_t state, struct ready_list *ready)
5287{
5288 rtx_insn *insn;
5289 rtx_insn_list *link;
5290 rtx_insn_list *next_link;
5291 rtx_insn_list *prev_link;
5292 bool move_to_ready;
5293 int cost;
5294 state_t temp_state = alloca (dfa_state_size)__builtin_alloca(dfa_state_size);
5295 int stalls;
5296 int insns_removed = 0;
5297
5298 /*
5299 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5300 function:
5301
5302 X == 0: There is no limit on how many queued insns can be removed
5303 prematurely. (flag_sched_stalled_insns = -1).
5304
5305 X >= 1: Only X queued insns can be removed prematurely in each
5306 invocation. (flag_sched_stalled_insns = X).
5307
5308 Otherwise: Early queue removal is disabled.
5309 (flag_sched_stalled_insns = 0)
5310 */
5311
5312 if (! flag_sched_stalled_insnsglobal_options.x_flag_sched_stalled_insns)
5313 return 0;
5314
5315 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5316 {
5317 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)(((q_ptr)+stalls) & max_insn_queue_index)]))
5318 {
5319 if (sched_verbose > 6)
5320 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5321
5322 prev_link = 0;
5323 while (link)
5324 {
5325 next_link = link->next ();
5326 insn = link->insn ();
5327 if (insn && sched_verbose > 6)
5328 print_rtl_single (sched_dump, insn);
5329
5330 memcpy (temp_state, state, dfa_state_size);
5331 if (recog_memoized (insn) < 0)
5332 /* non-negative to indicate that it's not ready
5333 to avoid infinite Q->R->Q->R... */
5334 cost = 0;
5335 else
5336 cost = state_transition (temp_state, insn);
5337
5338 if (sched_verbose >= 6)
5339 fprintf (sched_dump, "transition cost = %d\n", cost);
5340
5341 move_to_ready = false;
5342 if (cost < 0)
5343 {
5344 move_to_ready = ok_for_early_queue_removal (insn);
5345 if (move_to_ready == true)
5346 {
5347 /* move from Q to R */
5348 q_size -= 1;
5349 ready_add (ready, insn, false);
5350
5351 if (prev_link)
5352 XEXP (prev_link, 1)(((prev_link)->u.fld[1]).rt_rtx) = next_link;
5353 else
5354 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)(((q_ptr)+stalls) & max_insn_queue_index)] = next_link;
5355
5356 free_INSN_LIST_node (link);
5357
5358 if (sched_verbose >= 2)
5359 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5360 (*current_sched_info->print_insn) (insn, 0));
5361
5362 insns_removed++;
5363 if (insns_removed == flag_sched_stalled_insnsglobal_options.x_flag_sched_stalled_insns)
5364 /* Remove no more than flag_sched_stalled_insns insns
5365 from Q at a time. */
5366 return insns_removed;
5367 }
5368 }
5369
5370 if (move_to_ready == false)
5371 prev_link = link;
5372
5373 link = next_link;
5374 } /* while link */
5375 } /* if link */
5376
5377 } /* for stalls.. */
5378
5379 return insns_removed;
5380}
5381
5382
5383/* Print the ready list for debugging purposes.
5384 If READY_TRY is non-zero then only print insns that max_issue
5385 will consider. */
5386static void
5387debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5388{
5389 rtx_insn **p;
5390 int i;
5391
5392 if (ready->n_ready == 0)
5393 {
5394 fprintf (sched_dump, "\n");
5395 return;
5396 }
5397
5398 p = ready_lastpos (ready);
5399 for (i = 0; i < ready->n_ready; i++)
5400 {
5401 if (ready_try != NULLnullptr && ready_try[ready->n_ready - i - 1])
5402 continue;
5403
5404 fprintf (sched_dump, " %s:%d",
5405 (*current_sched_info->print_insn) (p[i], 0),
5406 INSN_LUID (p[i])(sched_luids[INSN_UID (p[i])]));
5407 if (sched_pressure != SCHED_PRESSURE_NONE)
5408 fprintf (sched_dump, "(cost=%d",
5409 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i])((&h_i_d[INSN_UID (p[i])])->reg_pressure_excess_cost_change
)
);
5410 fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i])((&h_i_d[INSN_UID (p[i])])->priority));
5411 if (INSN_TICK (p[i])((&h_i_d[INSN_UID (p[i])])->tick) > clock_var)
5412 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i])((&h_i_d[INSN_UID (p[i])])->tick) - clock_var);
5413 if (sched_pressure == SCHED_PRESSURE_MODEL)
5414 fprintf (sched_dump, ":idx=%d",
5415 model_index (p[i]));
5416 if (sched_pressure != SCHED_PRESSURE_NONE)
5417 fprintf (sched_dump, ")");
5418 }
5419 fprintf (sched_dump, "\n");
5420}
5421
5422/* Print the ready list. Callable from debugger. */
5423static void
5424debug_ready_list (struct ready_list *ready)
5425{
5426 debug_ready_list_1 (ready, NULLnullptr);
5427}
5428
5429/* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5430 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5431 replaces the epilogue note in the correct basic block. */
5432void
5433reemit_notes (rtx_insn *insn)
5434{
5435 rtx note;
5436 rtx_insn *last = insn;
5437
5438 for (note = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); note; note = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx))
5439 {
5440 if (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_SAVE_NOTE)
5441 {
5442 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0))(((((note)->u.fld[0]).rt_rtx))->u.hwint[0]);
5443
5444 last = emit_note_before (note_type, last);
5445 remove_note (insn, note);
5446 df_insn_create_insn_record (last);
5447 }
5448 }
5449}
5450
5451/* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5452static void
5453move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5454{
5455 if (PREV_INSN (insn) != last)
5456 {
5457 basic_block bb;
5458 rtx_insn *note;
5459 int jump_p = 0;
5460
5461 bb = BLOCK_FOR_INSN (insn);
5462
5463 /* BB_HEAD is either LABEL or NOTE. */
5464 gcc_assert (BB_HEAD (bb) != insn)((void)(!((bb)->il.x.head_ != insn) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5464, __FUNCTION__), 0 : 0))
;
5465
5466 if (BB_END (bb)(bb)->il.x.rtl->end_ == insn)
5467 /* If this is last instruction in BB, move end marker one
5468 instruction up. */
5469 {
5470 /* Jumps are always placed at the end of basic block. */
5471 jump_p = control_flow_insn_p (insn);
5472
5473 gcc_assert (!jump_p((void)(!(!jump_p || ((common_sched_info->sched_pass_id ==
SCHED_RGN_PASS) && (((&h_i_d[INSN_UID (insn)])->
recovery_block) != nullptr && ((&h_i_d[INSN_UID (
insn)])->recovery_block) != (((cfun + 0))->cfg->x_exit_block_ptr
))) || (common_sched_info->sched_pass_id == SCHED_EBB_PASS
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5477, __FUNCTION__), 0 : 0))
5474 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)((void)(!(!jump_p || ((common_sched_info->sched_pass_id ==
SCHED_RGN_PASS) && (((&h_i_d[INSN_UID (insn)])->
recovery_block) != nullptr && ((&h_i_d[INSN_UID (
insn)])->recovery_block) != (((cfun + 0))->cfg->x_exit_block_ptr
))) || (common_sched_info->sched_pass_id == SCHED_EBB_PASS
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5477, __FUNCTION__), 0 : 0))
5475 && IS_SPECULATION_BRANCHY_CHECK_P (insn))((void)(!(!jump_p || ((common_sched_info->sched_pass_id ==
SCHED_RGN_PASS) && (((&h_i_d[INSN_UID (insn)])->
recovery_block) != nullptr && ((&h_i_d[INSN_UID (
insn)])->recovery_block) != (((cfun + 0))->cfg->x_exit_block_ptr
))) || (common_sched_info->sched_pass_id == SCHED_EBB_PASS
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5477, __FUNCTION__), 0 : 0))
5476 || (common_sched_info->sched_pass_id((void)(!(!jump_p || ((common_sched_info->sched_pass_id ==
SCHED_RGN_PASS) && (((&h_i_d[INSN_UID (insn)])->
recovery_block) != nullptr && ((&h_i_d[INSN_UID (
insn)])->recovery_block) != (((cfun + 0))->cfg->x_exit_block_ptr
))) || (common_sched_info->sched_pass_id == SCHED_EBB_PASS
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5477, __FUNCTION__), 0 : 0))
5477 == SCHED_EBB_PASS))((void)(!(!jump_p || ((common_sched_info->sched_pass_id ==
SCHED_RGN_PASS) && (((&h_i_d[INSN_UID (insn)])->
recovery_block) != nullptr && ((&h_i_d[INSN_UID (
insn)])->recovery_block) != (((cfun + 0))->cfg->x_exit_block_ptr
))) || (common_sched_info->sched_pass_id == SCHED_EBB_PASS
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5477, __FUNCTION__), 0 : 0))
;
5478
5479 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb)((void)(!(BLOCK_FOR_INSN (PREV_INSN (insn)) == bb) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5479, __FUNCTION__), 0 : 0))
;
5480
5481 BB_END (bb)(bb)->il.x.rtl->end_ = PREV_INSN (insn);
5482 }
5483
5484 gcc_assert (BB_END (bb) != last)((void)(!((bb)->il.x.rtl->end_ != last) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5484, __FUNCTION__), 0 : 0))
;
5485
5486 if (jump_p)
5487 /* We move the block note along with jump. */
5488 {
5489 gcc_assert (nt)((void)(!(nt) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5489, __FUNCTION__), 0 : 0))
;
5490
5491 note = NEXT_INSN (insn);
5492 while (NOTE_NOT_BB_P (note)((((enum rtx_code) (note)->code) == NOTE) && ((((note
)->u.fld[4]).rt_int) != NOTE_INSN_BASIC_BLOCK))
&& note != nt)
5493 note = NEXT_INSN (note);
5494
5495 if (note != nt
5496 && (LABEL_P (note)(((enum rtx_code) (note)->code) == CODE_LABEL)
5497 || BARRIER_P (note)(((enum rtx_code) (note)->code) == BARRIER)))
5498 note = NEXT_INSN (note);
5499
5500 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note))((void)(!(((((enum rtx_code) (note)->code) == NOTE) &&
(((note)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5500, __FUNCTION__), 0 : 0))
;
5501 }
5502 else
5503 note = insn;
5504
5505 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5506 SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5507
5508 SET_NEXT_INSN (note) = NEXT_INSN (last);
5509 SET_PREV_INSN (NEXT_INSN (last)) = note;
5510
5511 SET_NEXT_INSN (last) = insn;
5512 SET_PREV_INSN (insn) = last;
5513
5514 bb = BLOCK_FOR_INSN (last);
5515
5516 if (jump_p)
5517 {
5518 fix_jump_move (insn);
5519
5520 if (BLOCK_FOR_INSN (insn) != bb)
5521 move_block_after_check (insn);
5522
5523 gcc_assert (BB_END (bb) == last)((void)(!((bb)->il.x.rtl->end_ == last) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5523, __FUNCTION__), 0 : 0))
;
5524 }
5525
5526 df_insn_change_bb (insn, bb);
5527
5528 /* Update BB_END, if needed. */
5529 if (BB_END (bb)(bb)->il.x.rtl->end_ == last)
5530 BB_END (bb)(bb)->il.x.rtl->end_ = insn;
5531 }
5532
5533 SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) (
_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5533, __FUNCTION__); _rtx; })->in_struct)
= 0;
5534}
5535
5536/* Return true if scheduling INSN will finish current clock cycle. */
5537static bool
5538insn_finishes_cycle_p (rtx_insn *insn)
5539{
5540 if (SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) (
_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5540, __FUNCTION__); _rtx; })->in_struct)
)
5541 /* After issuing INSN, rest of the sched_group will be forced to issue
5542 in order. Don't make any plans for the rest of cycle. */
5543 return true;
5544
5545 /* Finishing the block will, apparently, finish the cycle. */
5546 if (current_sched_info->insn_finishes_block_p
5547 && current_sched_info->insn_finishes_block_p (insn))
5548 return true;
5549
5550 return false;
5551}
5552
5553/* Helper for autopref_multipass_init. Given a SET in PAT and whether
5554 we're expecting a memory WRITE or not, check that the insn is relevant to
5555 the autoprefetcher modelling code. Return true iff that is the case.
5556 If it is relevant, record the base register of the memory op in BASE and
5557 the offset in OFFSET. */
5558
5559static bool
5560analyze_set_insn_for_autopref (rtx pat, bool write, rtx *base, int *offset)
5561{
5562 if (GET_CODE (pat)((enum rtx_code) (pat)->code) != SET)
5563 return false;
5564
5565 rtx mem = write ? SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx) : SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx);
5566 if (!MEM_P (mem)(((enum rtx_code) (mem)->code) == MEM))
5567 return false;
5568
5569 struct address_info info;
5570 decompose_mem_address (&info, mem);
5571
5572 /* TODO: Currently only (base+const) addressing is supported. */
5573 if (info.base == NULLnullptr || !REG_P (*info.base)(((enum rtx_code) (*info.base)->code) == REG)
5574 || (info.disp != NULLnullptr && !CONST_INT_P (*info.disp)(((enum rtx_code) (*info.disp)->code) == CONST_INT)))
5575 return false;
5576
5577 *base = *info.base;
5578 *offset = info.disp ? INTVAL (*info.disp)((*info.disp)->u.hwint[0]) : 0;
5579 return true;
5580}
5581
5582/* Functions to model cache auto-prefetcher.
5583
5584 Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5585 memory prefetches if it sees instructions with consequitive memory accesses
5586 in the instruction stream. Details of such hardware units are not published,
5587 so we can only guess what exactly is going on there.
5588 In the scheduler, we model abstract auto-prefetcher. If there are memory
5589 insns in the ready list (or the queue) that have same memory base, but
5590 different offsets, then we delay the insns with larger offsets until insns
5591 with smaller offsets get scheduled. If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5592 is "1", then we look at the ready list; if it is N>1, then we also look
5593 through N-1 queue entries.
5594 If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5595 among its heuristics.
5596 Param value of "-1" disables modelling of the auto-prefetcher. */
5597
5598/* Initialize autoprefetcher model data for INSN. */
5599static void
5600autopref_multipass_init (const rtx_insn *insn, int write)
5601{
5602 autopref_multipass_data_t data = &INSN_AUTOPREF_MULTIPASS_DATA (insn)((&h_i_d[INSN_UID (insn)])->autopref_multipass_data)[write];
5603
5604 gcc_assert (data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)((void)(!(data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5604, __FUNCTION__), 0 : 0))
;
5605 data->base = NULL_RTX(rtx) 0;
5606 data->offset = 0;
5607 /* Set insn entry initialized, but not relevant for auto-prefetcher. */
5608 data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5609
5610 rtx pat = PATTERN (insn);
5611
5612 /* We have a multi-set insn like a load-multiple or store-multiple.
5613 We care about these as long as all the memory ops inside the PARALLEL
5614 have the same base register. We care about the minimum and maximum
5615 offsets from that base but don't check for the order of those offsets
5616 within the PARALLEL insn itself. */
5617 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL)
5618 {
5619 int n_elems = XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem);
5620
5621 int i, offset;
5622 rtx base, prev_base = NULL_RTX(rtx) 0;
5623 int min_offset = INT_MAX2147483647;
5624
5625 for (i = 0; i < n_elems; i++)
5626 {
5627 rtx set = XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]);
5628 if (GET_CODE (set)((enum rtx_code) (set)->code) != SET)
5629 return;
5630
5631 if (!analyze_set_insn_for_autopref (set, write, &base, &offset))
5632 return;
5633
5634 /* Ensure that all memory operations in the PARALLEL use the same
5635 base register. */
5636 if (i > 0 && REGNO (base)(rhs_regno(base)) != REGNO (prev_base)(rhs_regno(prev_base)))
5637 return;
5638 prev_base = base;
5639 min_offset = MIN (min_offset, offset)((min_offset) < (offset) ? (min_offset) : (offset));
5640 }
5641
5642 /* If we reached here then we have a valid PARALLEL of multiple memory ops
5643 with prev_base as the base and min_offset containing the offset. */
5644 gcc_assert (prev_base)((void)(!(prev_base) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5644, __FUNCTION__), 0 : 0))
;
5645 data->base = prev_base;
5646 data->offset = min_offset;
5647 data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5648 return;
5649 }
5650
5651 /* Otherwise this is a single set memory operation. */
5652 rtx set = single_set (insn);
5653 if (set == NULL_RTX(rtx) 0)
5654 return;
5655
5656 if (!analyze_set_insn_for_autopref (set, write, &data->base,
5657 &data->offset))
5658 return;
5659
5660 /* This insn is relevant for the auto-prefetcher.
5661 The base and offset fields will have been filled in the
5662 analyze_set_insn_for_autopref call above. */
5663 data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5664}
5665
5666/* Helper function for rank_for_schedule sorting. */
5667static int
5668autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
5669{
5670 int r = 0;
5671 for (int write = 0; write < 2 && !r; ++write)
5672 {
5673 autopref_multipass_data_t data1
5674 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)((&h_i_d[INSN_UID (insn1)])->autopref_multipass_data)[write];
5675 autopref_multipass_data_t data2
5676 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)((&h_i_d[INSN_UID (insn2)])->autopref_multipass_data)[write];
5677
5678 if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5679 autopref_multipass_init (insn1, write);
5680
5681 if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5682 autopref_multipass_init (insn2, write);
5683
5684 int irrel1 = data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5685 int irrel2 = data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5686
5687 if (!irrel1 && !irrel2)
5688 /* Sort memory references from lowest offset to the largest. */
5689 r = data1->offset - data2->offset;
5690 else if (write)
5691 /* Schedule "irrelevant" insns before memory stores to resolve
5692 as many producer dependencies of stores as possible. */
5693 r = irrel2 - irrel1;
5694 else
5695 /* Schedule "irrelevant" insns after memory reads to avoid breaking
5696 memory read sequences. */
5697 r = irrel1 - irrel2;
5698 }
5699
5700 return r;
5701}
5702
5703/* True if header of debug dump was printed. */
5704static bool autopref_multipass_dfa_lookahead_guard_started_dump_p;
5705
5706/* Helper for autopref_multipass_dfa_lookahead_guard.
5707 Return "1" if INSN1 should be delayed in favor of INSN2. */
5708static int
5709autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn *insn1,
5710 const rtx_insn *insn2, int write)
5711{
5712 autopref_multipass_data_t data1
5713 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)((&h_i_d[INSN_UID (insn1)])->autopref_multipass_data)[write];
5714 autopref_multipass_data_t data2
5715 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)((&h_i_d[INSN_UID (insn2)])->autopref_multipass_data)[write];
5716
5717 if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5718 autopref_multipass_init (insn2, write);
5719 if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5720 return 0;
5721
5722 if (rtx_equal_p (data1->base, data2->base)
5723 && data1->offset > data2->offset)
5724 {
5725 if (sched_verbose >= 2)
5726 {
5727 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5728 {
5729 fprintf (sched_dump,
5730 ";;\t\tnot trying in max_issue due to autoprefetch "
5731 "model: ");
5732 autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5733 }
5734
5735 fprintf (sched_dump, " %d(%d)", INSN_UID (insn1), INSN_UID (insn2));
5736 }
5737
5738 return 1;
5739 }
5740
5741 return 0;
5742}
5743
5744/* General note:
5745
5746 We could have also hooked autoprefetcher model into
5747 first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5748 to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5749 (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5750 unblocked). We don't bother about this yet because target of interest
5751 (ARM Cortex-A15) can issue only 1 memory operation per cycle. */
5752
5753/* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5754 Return "1" if INSN1 should not be considered in max_issue due to
5755 auto-prefetcher considerations. */
5756int
5757autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
5758{
5759 int r = 0;
5760
5761 /* Exit early if the param forbids this or if we're not entering here through
5762 normal haifa scheduling. This can happen if selective scheduling is
5763 explicitly enabled. */
5764 if (!insn_queue || param_sched_autopref_queue_depthglobal_options.x_param_sched_autopref_queue_depth <= 0)
5765 return 0;
5766
5767 if (sched_verbose >= 2 && ready_index == 0)
5768 autopref_multipass_dfa_lookahead_guard_started_dump_p = false;
5769
5770 for (int write = 0; write < 2; ++write)
5771 {
5772 autopref_multipass_data_t data1
5773 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)((&h_i_d[INSN_UID (insn1)])->autopref_multipass_data)[write];
5774
5775 if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5776 autopref_multipass_init (insn1, write);
5777 if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5778 continue;
5779
5780 if (ready_index == 0
5781 && data1->status == AUTOPREF_MULTIPASS_DATA_DONT_DELAY)
5782 /* We allow only a single delay on priviledged instructions.
5783 Doing otherwise would cause infinite loop. */
5784 {
5785 if (sched_verbose >= 2)
5786 {
5787 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5788 {
5789 fprintf (sched_dump,
5790 ";;\t\tnot trying in max_issue due to autoprefetch "
5791 "model: ");
5792 autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5793 }
5794
5795 fprintf (sched_dump, " *%d*", INSN_UID (insn1));
5796 }
5797 continue;
5798 }
5799
5800 for (int i2 = 0; i2 < ready.n_ready; ++i2)
5801 {
5802 rtx_insn *insn2 = get_ready_element (i2);
5803 if (insn1 == insn2)
5804 continue;
5805 r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2, write);
5806 if (r)
5807 {
5808 if (ready_index == 0)
5809 {
5810 r = -1;
5811 data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5812 }
5813 goto finish;
5814 }
5815 }
5816
5817 if (param_sched_autopref_queue_depthglobal_options.x_param_sched_autopref_queue_depth == 1)
5818 continue;
5819
5820 /* Everything from the current queue slot should have been moved to
5821 the ready list. */
5822 gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX)((void)(!(insn_queue[(((q_ptr)+0) & max_insn_queue_index)
] == (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/haifa-sched.cc"
, 5822, __FUNCTION__), 0 : 0))
;
5823
5824 int n_stalls = param_sched_autopref_queue_depthglobal_options.x_param_sched_autopref_queue_depth - 1;
5825 if (n_stalls > max_insn_queue_index)
5826 n_stalls = max_insn_queue_index;
5827
5828 for (int stalls = 1; stalls <= n_stalls; ++stalls)
5829 {
5830 for (rtx_insn_list *link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)(((q_ptr)+stalls) & max_insn_queue_index)];
5831 link != NULL_RTX(rtx) 0;
5832 link = link->next ())
5833 {
5834 rtx_insn *insn2 = link->insn ();
5835 r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2,
5836 write);
5837 if (r)
5838 {
5839 /* Queue INSN1 until INSN2 can issue. */
5840 r = -stalls;