File: | build/gcc/sel-sched.cc |
Warning: | line 7033, column 7 Value stored to 'asm_p' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Instruction scheduling pass. Selective scheduler and pipeliner. |
2 | Copyright (C) 2006-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #include "config.h" |
21 | #include "system.h" |
22 | #include "coretypes.h" |
23 | #include "backend.h" |
24 | #include "tree.h" |
25 | #include "rtl.h" |
26 | #include "df.h" |
27 | #include "memmodel.h" |
28 | #include "tm_p.h" |
29 | #include "regs.h" |
30 | #include "cfgbuild.h" |
31 | #include "cfgcleanup.h" |
32 | #include "insn-config.h" |
33 | #include "insn-attr.h" |
34 | #include "target.h" |
35 | #include "sched-int.h" |
36 | #include "rtlhooks-def.h" |
37 | #include "ira.h" |
38 | #include "ira-int.h" |
39 | #include "rtl-iter.h" |
40 | |
41 | #ifdef INSN_SCHEDULING |
42 | #include "regset.h" |
43 | #include "cfgloop.h" |
44 | #include "sel-sched-ir.h" |
45 | #include "sel-sched-dump.h" |
46 | #include "sel-sched.h" |
47 | #include "dbgcnt.h" |
48 | #include "function-abi.h" |
49 | |
50 | /* Implementation of selective scheduling approach. |
51 | The below implementation follows the original approach with the following |
52 | changes: |
53 | |
54 | o the scheduler works after register allocation (but can be also tuned |
55 | to work before RA); |
56 | o some instructions are not copied or register renamed; |
57 | o conditional jumps are not moved with code duplication; |
58 | o several jumps in one parallel group are not supported; |
59 | o when pipelining outer loops, code motion through inner loops |
60 | is not supported; |
61 | o control and data speculation are supported; |
62 | o some improvements for better compile time/performance were made. |
63 | |
64 | Terminology |
65 | =========== |
66 | |
67 | A vinsn, or virtual insn, is an insn with additional data characterizing |
68 | insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc. |
69 | Vinsns also act as smart pointers to save memory by reusing them in |
70 | different expressions. A vinsn is described by vinsn_t type. |
71 | |
72 | An expression is a vinsn with additional data characterizing its properties |
73 | at some point in the control flow graph. The data may be its usefulness, |
74 | priority, speculative status, whether it was renamed/subsituted, etc. |
75 | An expression is described by expr_t type. |
76 | |
77 | Availability set (av_set) is a set of expressions at a given control flow |
78 | point. It is represented as av_set_t. The expressions in av sets are kept |
79 | sorted in the terms of expr_greater_p function. It allows to truncate |
80 | the set while leaving the best expressions. |
81 | |
82 | A fence is a point through which code motion is prohibited. On each step, |
83 | we gather a parallel group of insns at a fence. It is possible to have |
84 | multiple fences. A fence is represented via fence_t. |
85 | |
86 | A boundary is the border between the fence group and the rest of the code. |
87 | Currently, we never have more than one boundary per fence, as we finalize |
88 | the fence group when a jump is scheduled. A boundary is represented |
89 | via bnd_t. |
90 | |
91 | High-level overview |
92 | =================== |
93 | |
94 | The scheduler finds regions to schedule, schedules each one, and finalizes. |
95 | The regions are formed starting from innermost loops, so that when the inner |
96 | loop is pipelined, its prologue can be scheduled together with yet unprocessed |
97 | outer loop. The rest of acyclic regions are found using extend_rgns: |
98 | the blocks that are not yet allocated to any regions are traversed in top-down |
99 | order, and a block is added to a region to which all its predecessors belong; |
100 | otherwise, the block starts its own region. |
101 | |
102 | The main scheduling loop (sel_sched_region_2) consists of just |
103 | scheduling on each fence and updating fences. For each fence, |
104 | we fill a parallel group of insns (fill_insns) until some insns can be added. |
105 | First, we compute available exprs (av-set) at the boundary of the current |
106 | group. Second, we choose the best expression from it. If the stall is |
107 | required to schedule any of the expressions, we advance the current cycle |
108 | appropriately. So, the final group does not exactly correspond to a VLIW |
109 | word. Third, we move the chosen expression to the boundary (move_op) |
110 | and update the intermediate av sets and liveness sets. We quit fill_insns |
111 | when either no insns left for scheduling or we have scheduled enough insns |
112 | so we feel like advancing a scheduling point. |
113 | |
114 | Computing available expressions |
115 | =============================== |
116 | |
117 | The computation (compute_av_set) is a bottom-up traversal. At each insn, |
118 | we're moving the union of its successors' sets through it via |
119 | moveup_expr_set. The dependent expressions are removed. Local |
120 | transformations (substitution, speculation) are applied to move more |
121 | exprs. Then the expr corresponding to the current insn is added. |
122 | The result is saved on each basic block header. |
123 | |
124 | When traversing the CFG, we're moving down for no more than max_ws insns. |
125 | Also, we do not move down to ineligible successors (is_ineligible_successor), |
126 | which include moving along a back-edge, moving to already scheduled code, |
127 | and moving to another fence. The first two restrictions are lifted during |
128 | pipelining, which allows us to move insns along a back-edge. We always have |
129 | an acyclic region for scheduling because we forbid motion through fences. |
130 | |
131 | Choosing the best expression |
132 | ============================ |
133 | |
134 | We sort the final availability set via sel_rank_for_schedule, then we remove |
135 | expressions which are not yet ready (tick_check_p) or which dest registers |
136 | cannot be used. For some of them, we choose another register via |
137 | find_best_reg. To do this, we run find_used_regs to calculate the set of |
138 | registers which cannot be used. The find_used_regs function performs |
139 | a traversal of code motion paths for an expr. We consider for renaming |
140 | only registers which are from the same regclass as the original one and |
141 | using which does not interfere with any live ranges. Finally, we convert |
142 | the resulting set to the ready list format and use max_issue and reorder* |
143 | hooks similarly to the Haifa scheduler. |
144 | |
145 | Scheduling the best expression |
146 | ============================== |
147 | |
148 | We run the move_op routine to perform the same type of code motion paths |
149 | traversal as in find_used_regs. (These are working via the same driver, |
150 | code_motion_path_driver.) When moving down the CFG, we look for original |
151 | instruction that gave birth to a chosen expression. We undo |
152 | the transformations performed on an expression via the history saved in it. |
153 | When found, we remove the instruction or leave a reg-reg copy/speculation |
154 | check if needed. On a way up, we insert bookkeeping copies at each join |
155 | point. If a copy is not needed, it will be removed later during this |
156 | traversal. We update the saved av sets and liveness sets on the way up, too. |
157 | |
158 | Finalizing the schedule |
159 | ======================= |
160 | |
161 | When pipelining, we reschedule the blocks from which insns were pipelined |
162 | to get a tighter schedule. On Itanium, we also perform bundling via |
163 | the same routine from ia64.cc. |
164 | |
165 | Dependence analysis changes |
166 | =========================== |
167 | |
168 | We augmented the sched-deps.cc with hooks that get called when a particular |
169 | dependence is found in a particular part of an insn. Using these hooks, we |
170 | can do several actions such as: determine whether an insn can be moved through |
171 | another (has_dependence_p, moveup_expr); find out whether an insn can be |
172 | scheduled on the current cycle (tick_check_p); find out registers that |
173 | are set/used/clobbered by an insn and find out all the strange stuff that |
174 | restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in |
175 | init_global_and_expr_for_insn). |
176 | |
177 | Initialization changes |
178 | ====================== |
179 | |
180 | There are parts of haifa-sched.cc, sched-deps.cc, and sched-rgn.cc that are |
181 | reused in all of the schedulers. We have split up the initialization of data |
182 | of such parts into different functions prefixed with scheduler type and |
183 | postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish}, |
184 | sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc. |
185 | The same splitting is done with current_sched_info structure: |
186 | dependence-related parts are in sched_deps_info, common part is in |
187 | common_sched_info, and haifa/sel/etc part is in current_sched_info. |
188 | |
189 | Target contexts |
190 | =============== |
191 | |
192 | As we now have multiple-point scheduling, this would not work with backends |
193 | which save some of the scheduler state to use it in the target hooks. |
194 | For this purpose, we introduce a concept of target contexts, which |
195 | encapsulate such information. The backend should implement simple routines |
196 | of allocating/freeing/setting such a context. The scheduler calls these |
197 | as target hooks and handles the target context as an opaque pointer (similar |
198 | to the DFA state type, state_t). |
199 | |
200 | Various speedups |
201 | ================ |
202 | |
203 | As the correct data dependence graph is not supported during scheduling (which |
204 | is to be changed in mid-term), we cache as much of the dependence analysis |
205 | results as possible to avoid reanalyzing. This includes: bitmap caches on |
206 | each insn in stream of the region saying yes/no for a query with a pair of |
207 | UIDs; hashtables with the previously done transformations on each insn in |
208 | stream; a vector keeping a history of transformations on each expr. |
209 | |
210 | Also, we try to minimize the dependence context used on each fence to check |
211 | whether the given expression is ready for scheduling by removing from it |
212 | insns that are definitely completed the execution. The results of |
213 | tick_check_p checks are also cached in a vector on each fence. |
214 | |
215 | We keep a valid liveness set on each insn in a region to avoid the high |
216 | cost of recomputation on large basic blocks. |
217 | |
218 | Finally, we try to minimize the number of needed updates to the availability |
219 | sets. The updates happen in two cases: when fill_insns terminates, |
220 | we advance all fences and increase the stage number to show that the region |
221 | has changed and the sets are to be recomputed; and when the next iteration |
222 | of a loop in fill_insns happens (but this one reuses the saved av sets |
223 | on bb headers.) Thus, we try to break the fill_insns loop only when |
224 | "significant" number of insns from the current scheduling window was |
225 | scheduled. This should be made a target param. |
226 | |
227 | |
228 | TODO: correctly support the data dependence graph at all stages and get rid |
229 | of all caches. This should speed up the scheduler. |
230 | TODO: implement moving cond jumps with bookkeeping copies on both targets. |
231 | TODO: tune the scheduler before RA so it does not create too much pseudos. |
232 | |
233 | |
234 | References: |
235 | S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with |
236 | selective scheduling and software pipelining. |
237 | ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997. |
238 | |
239 | Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik, |
240 | and Dmitry Zhurikhin. An interblock VLIW-targeted instruction scheduler |
241 | for GCC. In Proceedings of GCC Developers' Summit 2006. |
242 | |
243 | Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik. GCC Instruction |
244 | Scheduler and Software Pipeliner on the Itanium Platform. EPIC-7 Workshop. |
245 | http://rogue.colorado.edu/EPIC7/. |
246 | |
247 | */ |
248 | |
249 | /* True when pipelining is enabled. */ |
250 | bool pipelining_p; |
251 | |
252 | /* True if bookkeeping is enabled. */ |
253 | bool bookkeeping_p; |
254 | |
255 | /* Maximum number of insns that are eligible for renaming. */ |
256 | int max_insns_to_rename; |
257 | |
258 | |
259 | /* Definitions of local types and macros. */ |
260 | |
261 | /* Represents possible outcomes of moving an expression through an insn. */ |
262 | enum MOVEUP_EXPR_CODE |
263 | { |
264 | /* The expression is not changed. */ |
265 | MOVEUP_EXPR_SAME, |
266 | |
267 | /* Not changed, but requires a new destination register. */ |
268 | MOVEUP_EXPR_AS_RHS, |
269 | |
270 | /* Cannot be moved. */ |
271 | MOVEUP_EXPR_NULL, |
272 | |
273 | /* Changed (substituted or speculated). */ |
274 | MOVEUP_EXPR_CHANGED |
275 | }; |
276 | |
277 | /* The container to be passed into rtx search & replace functions. */ |
278 | struct rtx_search_arg |
279 | { |
280 | /* What we are searching for. */ |
281 | rtx x; |
282 | |
283 | /* The occurrence counter. */ |
284 | int n; |
285 | }; |
286 | |
287 | typedef struct rtx_search_arg *rtx_search_arg_p; |
288 | |
289 | /* This struct contains precomputed hard reg sets that are needed when |
290 | computing registers available for renaming. */ |
291 | struct hard_regs_data |
292 | { |
293 | /* For every mode, this stores registers available for use with |
294 | that mode. */ |
295 | HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES]; |
296 | |
297 | /* True when regs_for_mode[mode] is initialized. */ |
298 | bool regs_for_mode_ok[NUM_MACHINE_MODES]; |
299 | |
300 | /* For every register, it has regs that are ok to rename into it. |
301 | The register in question is always set. If not, this means |
302 | that the whole set is not computed yet. */ |
303 | HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER76]; |
304 | |
305 | /* All registers that are used or call used. */ |
306 | HARD_REG_SET regs_ever_used; |
307 | |
308 | #ifdef STACK_REGS |
309 | /* Stack registers. */ |
310 | HARD_REG_SET stack_regs; |
311 | #endif |
312 | }; |
313 | |
314 | /* Holds the results of computation of available for renaming and |
315 | unavailable hard registers. */ |
316 | struct reg_rename |
317 | { |
318 | /* These are unavailable due to calls crossing, globalness, etc. */ |
319 | HARD_REG_SET unavailable_hard_regs; |
320 | |
321 | /* These are *available* for renaming. */ |
322 | HARD_REG_SET available_for_renaming; |
323 | |
324 | /* The set of ABIs used by calls that the code motion path crosses. */ |
325 | unsigned int crossed_call_abis : NUM_ABI_IDS; |
326 | }; |
327 | |
328 | /* A global structure that contains the needed information about harg |
329 | regs. */ |
330 | static struct hard_regs_data sel_hrd; |
331 | |
332 | |
333 | /* This structure holds local data used in code_motion_path_driver hooks on |
334 | the same or adjacent levels of recursion. Here we keep those parameters |
335 | that are not used in code_motion_path_driver routine itself, but only in |
336 | its hooks. Moreover, all parameters that can be modified in hooks are |
337 | in this structure, so all other parameters passed explicitly to hooks are |
338 | read-only. */ |
339 | struct cmpd_local_params |
340 | { |
341 | /* Local params used in move_op_* functions. */ |
342 | |
343 | /* Edges for bookkeeping generation. */ |
344 | edge e1, e2; |
345 | |
346 | /* C_EXPR merged from all successors and locally allocated temporary C_EXPR. */ |
347 | expr_t c_expr_merged, c_expr_local; |
348 | |
349 | /* Local params used in fur_* functions. */ |
350 | /* Copy of the ORIGINAL_INSN list, stores the original insns already |
351 | found before entering the current level of code_motion_path_driver. */ |
352 | def_list_t old_original_insns; |
353 | |
354 | /* Local params used in move_op_* functions. */ |
355 | /* True when we have removed last insn in the block which was |
356 | also a boundary. Do not update anything or create bookkeeping copies. */ |
357 | BOOL_BITFIELDunsigned int removed_last_insn : 1; |
358 | }; |
359 | |
360 | /* Stores the static parameters for move_op_* calls. */ |
361 | struct moveop_static_params |
362 | { |
363 | /* Destination register. */ |
364 | rtx dest; |
365 | |
366 | /* Current C_EXPR. */ |
367 | expr_t c_expr; |
368 | |
369 | /* An UID of expr_vliw which is to be moved up. If we find other exprs, |
370 | they are to be removed. */ |
371 | int uid; |
372 | |
373 | /* This is initialized to the insn on which the driver stopped its traversal. */ |
374 | insn_t failed_insn; |
375 | |
376 | /* True if we scheduled an insn with different register. */ |
377 | bool was_renamed; |
378 | }; |
379 | |
380 | /* Stores the static parameters for fur_* calls. */ |
381 | struct fur_static_params |
382 | { |
383 | /* Set of registers unavailable on the code motion path. */ |
384 | regset used_regs; |
385 | |
386 | /* Pointer to the list of original insns definitions. */ |
387 | def_list_t *original_insns; |
388 | |
389 | /* The set of ABIs used by calls that the code motion path crosses. */ |
390 | unsigned int crossed_call_abis : NUM_ABI_IDS; |
391 | }; |
392 | |
393 | typedef struct fur_static_params *fur_static_params_p; |
394 | typedef struct cmpd_local_params *cmpd_local_params_p; |
395 | typedef struct moveop_static_params *moveop_static_params_p; |
396 | |
397 | /* Set of hooks and parameters that determine behavior specific to |
398 | move_op or find_used_regs functions. */ |
399 | struct code_motion_path_driver_info_def |
400 | { |
401 | /* Called on enter to the basic block. */ |
402 | int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool); |
403 | |
404 | /* Called when original expr is found. */ |
405 | void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *); |
406 | |
407 | /* Called while descending current basic block if current insn is not |
408 | the original EXPR we're searching for. */ |
409 | bool (*orig_expr_not_found) (insn_t, av_set_t, void *); |
410 | |
411 | /* Function to merge C_EXPRes from different successors. */ |
412 | void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *); |
413 | |
414 | /* Function to finalize merge from different successors and possibly |
415 | deallocate temporary data structures used for merging. */ |
416 | void (*after_merge_succs) (cmpd_local_params_p, void *); |
417 | |
418 | /* Called on the backward stage of recursion to do moveup_expr. |
419 | Used only with move_op_*. */ |
420 | void (*ascend) (insn_t, void *); |
421 | |
422 | /* Called on the ascending pass, before returning from the current basic |
423 | block or from the whole traversal. */ |
424 | void (*at_first_insn) (insn_t, cmpd_local_params_p, void *); |
425 | |
426 | /* When processing successors in move_op we need only descend into |
427 | SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL. */ |
428 | int succ_flags; |
429 | |
430 | /* The routine name to print in dumps ("move_op" of "find_used_regs"). */ |
431 | const char *routine_name; |
432 | }; |
433 | |
434 | /* Global pointer to current hooks, either points to MOVE_OP_HOOKS or |
435 | FUR_HOOKS. */ |
436 | struct code_motion_path_driver_info_def *code_motion_path_driver_info; |
437 | |
438 | /* Set of hooks for performing move_op and find_used_regs routines with |
439 | code_motion_path_driver. */ |
440 | extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks; |
441 | |
442 | /* True if/when we want to emulate Haifa scheduler in the common code. |
443 | This is used in sched_rgn_local_init and in various places in |
444 | sched-deps.cc. */ |
445 | int sched_emulate_haifa_p; |
446 | |
447 | /* GLOBAL_LEVEL is used to discard information stored in basic block headers |
448 | av_sets. Av_set of bb header is valid if its (bb header's) level is equal |
449 | to GLOBAL_LEVEL. And invalid if lesser. This is primarily used to advance |
450 | scheduling window. */ |
451 | int global_level; |
452 | |
453 | /* Current fences. */ |
454 | flist_t fences; |
455 | |
456 | /* True when separable insns should be scheduled as RHSes. */ |
457 | static bool enable_schedule_as_rhs_p; |
458 | |
459 | /* Used in verify_target_availability to assert that target reg is reported |
460 | unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if |
461 | we haven't scheduled anything on the previous fence. |
462 | if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can |
463 | have more conservative value than the one returned by the |
464 | find_used_regs, thus we shouldn't assert that these values are equal. */ |
465 | static bool scheduled_something_on_previous_fence; |
466 | |
467 | /* All newly emitted insns will have their uids greater than this value. */ |
468 | static int first_emitted_uid; |
469 | |
470 | /* Set of basic blocks that are forced to start new ebbs. This is a subset |
471 | of all the ebb heads. */ |
472 | bitmap forced_ebb_heads; |
473 | |
474 | /* Blocks that need to be rescheduled after pipelining. */ |
475 | bitmap blocks_to_reschedule = NULLnullptr; |
476 | |
477 | /* True when the first lv set should be ignored when updating liveness. */ |
478 | static bool ignore_first = false; |
479 | |
480 | /* Number of insns max_issue has initialized data structures for. */ |
481 | static int max_issue_size = 0; |
482 | |
483 | /* Whether we can issue more instructions. */ |
484 | static int can_issue_more; |
485 | |
486 | /* Maximum software lookahead window size, reduced when rescheduling after |
487 | pipelining. */ |
488 | static int max_ws; |
489 | |
490 | /* Number of insns scheduled in current region. */ |
491 | static int num_insns_scheduled; |
492 | |
493 | /* A vector of expressions is used to be able to sort them. */ |
494 | static vec<expr_t> vec_av_set; |
495 | |
496 | /* A vector of vinsns is used to hold temporary lists of vinsns. */ |
497 | typedef vec<vinsn_t> vinsn_vec_t; |
498 | |
499 | /* This vector has the exprs which may still present in av_sets, but actually |
500 | can't be moved up due to bookkeeping created during code motion to another |
501 | fence. See comment near the call to update_and_record_unavailable_insns |
502 | for the detailed explanations. */ |
503 | static vinsn_vec_t vec_bookkeeping_blocked_vinsns = vinsn_vec_t (); |
504 | |
505 | /* This vector has vinsns which are scheduled with renaming on the first fence |
506 | and then seen on the second. For expressions with such vinsns, target |
507 | availability information may be wrong. */ |
508 | static vinsn_vec_t vec_target_unavailable_vinsns = vinsn_vec_t (); |
509 | |
510 | /* Vector to store temporary nops inserted in move_op to prevent removal |
511 | of empty bbs. */ |
512 | static vec<insn_t> vec_temp_moveop_nops; |
513 | |
514 | /* These bitmaps record original instructions scheduled on the current |
515 | iteration and bookkeeping copies created by them. */ |
516 | static bitmap current_originators = NULLnullptr; |
517 | static bitmap current_copies = NULLnullptr; |
518 | |
519 | /* This bitmap marks the blocks visited by code_motion_path_driver so we don't |
520 | visit them afterwards. */ |
521 | static bitmap code_motion_visited_blocks = NULLnullptr; |
522 | |
523 | /* Variables to accumulate different statistics. */ |
524 | |
525 | /* The number of bookkeeping copies created. */ |
526 | static int stat_bookkeeping_copies; |
527 | |
528 | /* The number of insns that required bookkeeiping for their scheduling. */ |
529 | static int stat_insns_needed_bookkeeping; |
530 | |
531 | /* The number of insns that got renamed. */ |
532 | static int stat_renamed_scheduled; |
533 | |
534 | /* The number of substitutions made during scheduling. */ |
535 | static int stat_substitutions_total; |
536 | |
537 | |
538 | /* Forward declarations of static functions. */ |
539 | static bool rtx_ok_for_substitution_p (rtx, rtx); |
540 | static int sel_rank_for_schedule (const void *, const void *); |
541 | static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool); |
542 | static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax); |
543 | |
544 | static rtx get_dest_from_orig_ops (av_set_t); |
545 | static basic_block generate_bookkeeping_insn (expr_t, edge, edge); |
546 | static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *, |
547 | def_list_t *); |
548 | static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*); |
549 | static int code_motion_path_driver (insn_t, av_set_t, ilist_t, |
550 | cmpd_local_params_p, void *); |
551 | static void sel_sched_region_1 (void); |
552 | static void sel_sched_region_2 (int); |
553 | static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool); |
554 | |
555 | static void debug_state (state_t); |
556 | |
557 | |
558 | /* Functions that work with fences. */ |
559 | |
560 | /* Advance one cycle on FENCE. */ |
561 | static void |
562 | advance_one_cycle (fence_t fence) |
563 | { |
564 | unsigned i; |
565 | int cycle; |
566 | rtx_insn *insn; |
567 | |
568 | advance_state (FENCE_STATE (fence)((fence)->state)); |
569 | cycle = ++FENCE_CYCLE (fence)((fence)->cycle); |
570 | FENCE_ISSUED_INSNS (fence)((fence)->cycle_issued_insns) = 0; |
571 | FENCE_STARTS_CYCLE_P (fence)((fence)->starts_cycle_p) = 1; |
572 | can_issue_more = issue_rate; |
573 | FENCE_ISSUE_MORE (fence)((fence)->issue_more) = can_issue_more; |
574 | |
575 | for (i = 0; vec_safe_iterate (FENCE_EXECUTING_INSNS (fence)((fence)->executing_insns), i, &insn); ) |
576 | { |
577 | if (INSN_READY_CYCLE (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->ready_cycle ) < cycle) |
578 | { |
579 | remove_from_deps (FENCE_DC (fence)((fence)->dc), insn); |
580 | FENCE_EXECUTING_INSNS (fence)((fence)->executing_insns)->unordered_remove (i); |
581 | continue; |
582 | } |
583 | i++; |
584 | } |
585 | if (sched_verbose >= 2) |
586 | { |
587 | sel_print ("Finished a cycle. Current cycle = %d\n", FENCE_CYCLE (fence)((fence)->cycle)); |
588 | debug_state (FENCE_STATE (fence)((fence)->state)); |
589 | } |
590 | } |
591 | |
592 | /* Returns true when SUCC in a fallthru bb of INSN, possibly |
593 | skipping empty basic blocks. */ |
594 | static bool |
595 | in_fallthru_bb_p (rtx_insn *insn, rtx succ) |
596 | { |
597 | basic_block bb = BLOCK_FOR_INSN (insn); |
598 | edge e; |
599 | |
600 | if (bb == BLOCK_FOR_INSN (succ)) |
601 | return true; |
602 | |
603 | e = find_fallthru_edge_from (bb); |
604 | if (e) |
605 | bb = e->dest; |
606 | else |
607 | return false; |
608 | |
609 | while (sel_bb_empty_p (bb)) |
610 | bb = bb->next_bb; |
611 | |
612 | return bb == BLOCK_FOR_INSN (succ); |
613 | } |
614 | |
615 | /* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES. |
616 | When a successor will continue a ebb, transfer all parameters of a fence |
617 | to the new fence. ORIG_MAX_SEQNO is the maximal seqno before this round |
618 | of scheduling helping to distinguish between the old and the new code. */ |
619 | static void |
620 | extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences, |
621 | int orig_max_seqno) |
622 | { |
623 | bool was_here_p = false; |
624 | insn_t insn = NULLnullptr; |
625 | insn_t succ; |
626 | succ_iterator si; |
627 | ilist_iterator ii; |
628 | fence_t fence = FLIST_FENCE (old_fences)(&(old_fences)->u.fence); |
629 | basic_block bb; |
630 | |
631 | /* Get the only element of FENCE_BNDS (fence). */ |
632 | FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence))for (_list_iter_start (&((ii)), &((((fence)->bnds) )), false); _list_iter_cond_insn (*((ii)).lp, &((insn))); _list_iter_next (&((ii)))) |
633 | { |
634 | gcc_assert (!was_here_p)((void)(!(!was_here_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 634, __FUNCTION__), 0 : 0)); |
635 | was_here_p = true; |
636 | } |
637 | gcc_assert (was_here_p && insn != NULL_RTX)((void)(!(was_here_p && insn != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 637, __FUNCTION__), 0 : 0)); |
638 | |
639 | /* When in the "middle" of the block, just move this fence |
640 | to the new list. */ |
641 | bb = BLOCK_FOR_INSN (insn); |
642 | if (! sel_bb_end_p (insn) |
643 | || (single_succ_p (bb) |
644 | && single_pred_p (single_succ (bb)))) |
645 | { |
646 | insn_t succ; |
647 | |
648 | succ = (sel_bb_end_p (insn) |
649 | ? sel_bb_head (single_succ (bb)) |
650 | : NEXT_INSN (insn)); |
651 | |
652 | if (INSN_SEQNO (succ)((&s_i_d[(sched_luids[INSN_UID (succ)])])->seqno) > 0 |
653 | && INSN_SEQNO (succ)((&s_i_d[(sched_luids[INSN_UID (succ)])])->seqno) <= orig_max_seqno |
654 | && INSN_SCHED_TIMES (succ)((((&(&s_i_d[(sched_luids[INSN_UID (succ)])])->expr ))->sched_times)) <= 0) |
655 | { |
656 | FENCE_INSN (fence)((fence)->insn) = succ; |
657 | move_fence_to_fences (old_fences, new_fences); |
658 | |
659 | if (sched_verbose >= 1) |
660 | sel_print ("Fence %d continues as %d[%d] (state continue)\n", |
661 | INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ)(BLOCK_FOR_INSN (succ)->index + 0)); |
662 | } |
663 | return; |
664 | } |
665 | |
666 | /* Otherwise copy fence's structures to (possibly) multiple successors. */ |
667 | FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)for ((si) = _succ_iter_start (&(succ), (insn), ((1) | (8) )); _succ_iter_cond (&(si), &(succ), (insn), _eligible_successor_edge_p ); _succ_iter_next (&(si))) |
668 | { |
669 | int seqno = INSN_SEQNO (succ)((&s_i_d[(sched_luids[INSN_UID (succ)])])->seqno); |
670 | |
671 | if (seqno > 0 && seqno <= orig_max_seqno |
672 | && (pipelining_p || INSN_SCHED_TIMES (succ)((((&(&s_i_d[(sched_luids[INSN_UID (succ)])])->expr ))->sched_times)) <= 0)) |
673 | { |
674 | bool b = (in_same_ebb_p (insn, succ) |
675 | || in_fallthru_bb_p (insn, succ)); |
676 | |
677 | if (sched_verbose >= 1) |
678 | sel_print ("Fence %d continues as %d[%d] (state %s)\n", |
679 | INSN_UID (insn), INSN_UID (succ), |
680 | BLOCK_NUM (succ)(BLOCK_FOR_INSN (succ)->index + 0), b ? "continue" : "reset"); |
681 | |
682 | if (b) |
683 | add_dirty_fence_to_fences (new_fences, succ, fence); |
684 | else |
685 | { |
686 | /* Mark block of the SUCC as head of the new ebb. */ |
687 | bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ)(BLOCK_FOR_INSN (succ)->index + 0)); |
688 | add_clean_fence_to_fences (new_fences, succ, fence); |
689 | } |
690 | } |
691 | } |
692 | } |
693 | |
694 | |
695 | /* Functions to support substitution. */ |
696 | |
697 | /* Returns whether INSN with dependence status DS is eligible for |
698 | substitution, i.e. it's a copy operation x := y, and RHS that is |
699 | moved up through this insn should be substituted. */ |
700 | static bool |
701 | can_substitute_through_p (insn_t insn, ds_t ds) |
702 | { |
703 | /* We can substitute only true dependencies. */ |
704 | if ((ds & DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1)) |
705 | || (ds & DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) |
706 | || ! INSN_RHS (insn)(((((&((((((&(&s_i_d[(sched_luids[INSN_UID (insn) ])])->expr))->vinsn)))->id)))->rhs))) |
707 | || ! INSN_LHS (insn)(((((&((((((&(&s_i_d[(sched_luids[INSN_UID (insn) ])])->expr))->vinsn)))->id)))->lhs)))) |
708 | return false; |
709 | |
710 | /* Now we just need to make sure the INSN_RHS consists of only one |
711 | simple REG rtx. */ |
712 | if (REG_P (INSN_LHS (insn))(((enum rtx_code) ((((((&((((((&(&s_i_d[(sched_luids [INSN_UID (insn)])])->expr))->vinsn)))->id)))->lhs ))))->code) == REG) |
713 | && REG_P (INSN_RHS (insn))(((enum rtx_code) ((((((&((((((&(&s_i_d[(sched_luids [INSN_UID (insn)])])->expr))->vinsn)))->id)))->rhs ))))->code) == REG)) |
714 | return true; |
715 | return false; |
716 | } |
717 | |
718 | /* Substitute all occurrences of INSN's destination in EXPR' vinsn with INSN's |
719 | source (if INSN is eligible for substitution). Returns TRUE if |
720 | substitution was actually performed, FALSE otherwise. Substitution might |
721 | be not performed because it's either EXPR' vinsn doesn't contain INSN's |
722 | destination or the resulting insn is invalid for the target machine. |
723 | When UNDO is true, perform unsubstitution instead (the difference is in |
724 | the part of rtx on which validate_replace_rtx is called). */ |
725 | static bool |
726 | substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo) |
727 | { |
728 | rtx *where; |
729 | bool new_insn_valid; |
730 | vinsn_t *vi = &EXPR_VINSN (expr)((expr)->vinsn); |
731 | bool has_rhs = VINSN_RHS (*vi)((((&((*vi)->id)))->rhs)) != NULLnullptr; |
732 | rtx old, new_rtx; |
733 | |
734 | /* Do not try to replace in SET_DEST. Although we'll choose new |
735 | register for the RHS, we don't want to change RHS' original reg. |
736 | If the insn is not SET, we may still be able to substitute something |
737 | in it, and if we're here (don't have deps), it doesn't write INSN's |
738 | dest. */ |
739 | where = (has_rhs |
740 | ? &VINSN_RHS (*vi)((((&((*vi)->id)))->rhs)) |
741 | : &PATTERN (VINSN_INSN_RTX (*vi)((*vi)->insn_rtx))); |
742 | old = undo ? INSN_RHS (insn)(((((&((((((&(&s_i_d[(sched_luids[INSN_UID (insn) ])])->expr))->vinsn)))->id)))->rhs))) : INSN_LHS (insn)(((((&((((((&(&s_i_d[(sched_luids[INSN_UID (insn) ])])->expr))->vinsn)))->id)))->lhs))); |
743 | |
744 | /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI. */ |
745 | if (rtx_ok_for_substitution_p (old, *where)) |
746 | { |
747 | rtx_insn *new_insn; |
748 | rtx *where_replace; |
749 | |
750 | /* We should copy these rtxes before substitution. */ |
751 | new_rtx = copy_rtx (undo ? INSN_LHS (insn)(((((&((((((&(&s_i_d[(sched_luids[INSN_UID (insn) ])])->expr))->vinsn)))->id)))->lhs))) : INSN_RHS (insn)(((((&((((((&(&s_i_d[(sched_luids[INSN_UID (insn) ])])->expr))->vinsn)))->id)))->rhs)))); |
752 | new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi)((*vi)->insn_rtx)); |
753 | |
754 | /* Where we'll replace. |
755 | WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be |
756 | used instead of SET_SRC. */ |
757 | where_replace = (has_rhs |
758 | ? &SET_SRC (PATTERN (new_insn))(((PATTERN (new_insn))->u.fld[1]).rt_rtx) |
759 | : &PATTERN (new_insn)); |
760 | |
761 | new_insn_valid |
762 | = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace, |
763 | new_insn); |
764 | |
765 | /* ??? Actually, constrain_operands result depends upon choice of |
766 | destination register. E.g. if we allow single register to be an rhs, |
767 | and if we try to move dx=ax(as rhs) through ax=dx, we'll result |
768 | in invalid insn dx=dx, so we'll loose this rhs here. |
769 | Just can't come up with significant testcase for this, so just |
770 | leaving it for now. */ |
771 | if (new_insn_valid) |
772 | { |
773 | change_vinsn_in_expr (expr, |
774 | create_vinsn_from_insn_rtx (new_insn, false)); |
775 | |
776 | /* Do not allow clobbering the address register of speculative |
777 | insns. */ |
778 | if ((EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds) & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) |
779 | && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr))((((&((((expr)->vinsn))->id)))->reg_uses)), |
780 | expr_dest_reg (expr))) |
781 | EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) = false; |
782 | |
783 | return true; |
784 | } |
785 | else |
786 | return false; |
787 | } |
788 | else |
789 | return false; |
790 | } |
791 | |
792 | /* Return the number of places WHAT appears within WHERE. |
793 | Bail out when we found a reference occupying several hard registers. */ |
794 | static int |
795 | count_occurrences_equiv (const_rtx what, const_rtx where) |
796 | { |
797 | int count = 0; |
798 | subrtx_iterator::array_type array; |
799 | FOR_EACH_SUBRTX (iter, array, where, NONCONST)for (subrtx_iterator iter (array, where, rtx_nonconst_subrtx_bounds ); !iter.at_end (); iter.next ()) |
800 | { |
801 | const_rtx x = *iter; |
802 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && REGNO (x)(rhs_regno(x)) == REGNO (what)(rhs_regno(what))) |
803 | { |
804 | /* Bail out if mode is different or more than one register is |
805 | used. */ |
806 | if (GET_MODE (x)((machine_mode) (x)->mode) != GET_MODE (what)((machine_mode) (what)->mode) || REG_NREGS (x)((&(x)->u.reg)->nregs) > 1) |
807 | return 0; |
808 | count += 1; |
809 | } |
810 | else if (GET_CODE (x)((enum rtx_code) (x)->code) == SUBREG |
811 | && (!REG_P (SUBREG_REG (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG ) |
812 | || REGNO (SUBREG_REG (x))(rhs_regno((((x)->u.fld[0]).rt_rtx))) == REGNO (what)(rhs_regno(what)))) |
813 | /* ??? Do not support substituting regs inside subregs. In that case, |
814 | simplify_subreg will be called by validate_replace_rtx, and |
815 | unsubstitution will fail later. */ |
816 | return 0; |
817 | } |
818 | return count; |
819 | } |
820 | |
821 | /* Returns TRUE if WHAT is found in WHERE rtx tree. */ |
822 | static bool |
823 | rtx_ok_for_substitution_p (rtx what, rtx where) |
824 | { |
825 | return (count_occurrences_equiv (what, where) > 0); |
826 | } |
827 | |
828 | |
829 | /* Functions to support register renaming. */ |
830 | |
831 | /* Substitute VI's set source with REGNO. Returns newly created pattern |
832 | that has REGNO as its source. */ |
833 | static rtx_insn * |
834 | create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx) |
835 | { |
836 | rtx lhs_rtx; |
837 | rtx pattern; |
838 | rtx_insn *insn_rtx; |
839 | |
840 | lhs_rtx = copy_rtx (VINSN_LHS (vi)((((&((vi)->id)))->lhs))); |
841 | |
842 | pattern = gen_rtx_SET (lhs_rtx, rhs_rtx)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((lhs_rtx )), ((rhs_rtx)) ); |
843 | insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX(rtx) 0); |
844 | |
845 | return insn_rtx; |
846 | } |
847 | |
848 | /* Returns whether INSN's src can be replaced with register number |
849 | NEW_SRC_REG. E.g. the following insn is valid for i386: |
850 | |
851 | (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337 |
852 | (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp) |
853 | (reg:SI 0 ax [orig:770 c1 ] [770])) |
854 | (const_int 288 [0x120])) [0 str S1 A8]) |
855 | (const_int 0 [0x0])) 43 {*movqi_1} (nil) |
856 | (nil)) |
857 | |
858 | But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid |
859 | because of operand constraints: |
860 | |
861 | (define_insn "*movqi_1" |
862 | [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m") |
863 | (match_operand:QI 1 "general_operand" " q,qn,qm,q,rn,qm,qn") |
864 | )] |
865 | |
866 | So do constrain_operands here, before choosing NEW_SRC_REG as best |
867 | reg for rhs. */ |
868 | |
869 | static bool |
870 | replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg) |
871 | { |
872 | vinsn_t vi = INSN_VINSN (insn)((((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->vinsn)); |
873 | machine_mode mode; |
874 | rtx dst_loc; |
875 | bool res; |
876 | |
877 | gcc_assert (VINSN_SEPARABLE_P (vi))((void)(!((((((&((vi)->id)))->type)) == SET)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 877, __FUNCTION__), 0 : 0)); |
878 | |
879 | get_dest_and_mode (insn, &dst_loc, &mode); |
880 | gcc_assert (mode == GET_MODE (new_src_reg))((void)(!(mode == ((machine_mode) (new_src_reg)->mode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 880, __FUNCTION__), 0 : 0)); |
881 | |
882 | if (REG_P (dst_loc)(((enum rtx_code) (dst_loc)->code) == REG) && REGNO (new_src_reg)(rhs_regno(new_src_reg)) == REGNO (dst_loc)(rhs_regno(dst_loc))) |
883 | return true; |
884 | |
885 | /* See whether SET_SRC can be replaced with this register. */ |
886 | validate_change (insn, &SET_SRC (PATTERN (insn))(((PATTERN (insn))->u.fld[1]).rt_rtx), new_src_reg, 1); |
887 | res = verify_changes (0); |
888 | cancel_changes (0); |
889 | |
890 | return res; |
891 | } |
892 | |
893 | /* Returns whether INSN still be valid after replacing it's DEST with |
894 | register NEW_REG. */ |
895 | static bool |
896 | replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg) |
897 | { |
898 | vinsn_t vi = INSN_VINSN (insn)((((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->vinsn)); |
899 | bool res; |
900 | |
901 | /* We should deal here only with separable insns. */ |
902 | gcc_assert (VINSN_SEPARABLE_P (vi))((void)(!((((((&((vi)->id)))->type)) == SET)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 902, __FUNCTION__), 0 : 0)); |
903 | gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg))((void)(!(((machine_mode) (((((&((vi)->id)))->lhs)) )->mode) == ((machine_mode) (new_reg)->mode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 903, __FUNCTION__), 0 : 0)); |
904 | |
905 | /* See whether SET_DEST can be replaced with this register. */ |
906 | validate_change (insn, &SET_DEST (PATTERN (insn))(((PATTERN (insn))->u.fld[0]).rt_rtx), new_reg, 1); |
907 | res = verify_changes (0); |
908 | cancel_changes (0); |
909 | |
910 | return res; |
911 | } |
912 | |
913 | /* Create a pattern with rhs of VI and lhs of LHS_RTX. */ |
914 | static rtx_insn * |
915 | create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx) |
916 | { |
917 | rtx rhs_rtx; |
918 | rtx pattern; |
919 | rtx_insn *insn_rtx; |
920 | |
921 | rhs_rtx = copy_rtx (VINSN_RHS (vi)((((&((vi)->id)))->rhs))); |
922 | |
923 | pattern = gen_rtx_SET (lhs_rtx, rhs_rtx)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((lhs_rtx )), ((rhs_rtx)) ); |
924 | insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX(rtx) 0); |
925 | |
926 | return insn_rtx; |
927 | } |
928 | |
929 | /* Substitute lhs in the given expression EXPR for the register with number |
930 | NEW_REGNO. SET_DEST may be arbitrary rtx, not only register. */ |
931 | static void |
932 | replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg) |
933 | { |
934 | rtx_insn *insn_rtx; |
935 | vinsn_t vinsn; |
936 | |
937 | insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr)((expr)->vinsn), new_reg); |
938 | vinsn = create_vinsn_from_insn_rtx (insn_rtx, false); |
939 | |
940 | change_vinsn_in_expr (expr, vinsn); |
941 | EXPR_WAS_RENAMED (expr)((expr)->was_renamed) = 1; |
942 | EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) = 1; |
943 | } |
944 | |
945 | /* Returns whether VI writes either one of the USED_REGS registers or, |
946 | if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers. */ |
947 | static bool |
948 | vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs, |
949 | HARD_REG_SET unavailable_hard_regs) |
950 | { |
951 | unsigned regno; |
952 | reg_set_iterator rsi; |
953 | |
954 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi)for (bmp_iter_set_init (&(rsi), (((((&((vi)->id))) ->reg_sets))), (0), &(regno)); bmp_iter_set (&(rsi ), &(regno)); bmp_iter_next (&(rsi), &(regno))) |
955 | { |
956 | if (REGNO_REG_SET_P (used_regs, regno)bitmap_bit_p (used_regs, regno)) |
957 | return true; |
958 | if (HARD_REGISTER_NUM_P (regno)((regno) < 76) |
959 | && TEST_HARD_REG_BIT (unavailable_hard_regs, regno)) |
960 | return true; |
961 | } |
962 | |
963 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi)for (bmp_iter_set_init (&(rsi), (((((&((vi)->id))) ->reg_clobbers))), (0), &(regno)); bmp_iter_set (& (rsi), &(regno)); bmp_iter_next (&(rsi), &(regno) )) |
964 | { |
965 | if (REGNO_REG_SET_P (used_regs, regno)bitmap_bit_p (used_regs, regno)) |
966 | return true; |
967 | if (HARD_REGISTER_NUM_P (regno)((regno) < 76) |
968 | && TEST_HARD_REG_BIT (unavailable_hard_regs, regno)) |
969 | return true; |
970 | } |
971 | |
972 | return false; |
973 | } |
974 | |
975 | /* Returns register class of the output register in INSN. |
976 | Returns NO_REGS for call insns because some targets have constraints on |
977 | destination register of a call insn. |
978 | |
979 | Code adopted from regrename.cc::build_def_use. */ |
980 | static enum reg_class |
981 | get_reg_class (rtx_insn *insn) |
982 | { |
983 | int i, n_ops; |
984 | |
985 | extract_constrain_insn (insn); |
986 | preprocess_constraints (insn); |
987 | n_ops = recog_data.n_operands; |
988 | |
989 | const operand_alternative *op_alt = which_op_alt (); |
990 | if (asm_noperands (PATTERN (insn)) > 0) |
991 | { |
992 | for (i = 0; i < n_ops; i++) |
993 | if (recog_data.operand_type[i] == OP_OUT) |
994 | { |
995 | rtx *loc = recog_data.operand_loc[i]; |
996 | rtx op = *loc; |
997 | enum reg_class cl = alternative_class (op_alt, i); |
998 | |
999 | if (REG_P (op)(((enum rtx_code) (op)->code) == REG) |
1000 | && REGNO (op)(rhs_regno(op)) == ORIGINAL_REGNO (op)(__extension__ ({ __typeof ((op)) const _rtx = ((op)); if ((( enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1000, __FUNCTION__); _rtx; })->u2.original_regno)) |
1001 | continue; |
1002 | |
1003 | return cl; |
1004 | } |
1005 | } |
1006 | else if (!CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
1007 | { |
1008 | for (i = 0; i < n_ops + recog_data.n_dups; i++) |
1009 | { |
1010 | int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops]; |
1011 | enum reg_class cl = alternative_class (op_alt, opn); |
1012 | |
1013 | if (recog_data.operand_type[opn] == OP_OUT || |
1014 | recog_data.operand_type[opn] == OP_INOUT) |
1015 | return cl; |
1016 | } |
1017 | } |
1018 | |
1019 | /* Insns like |
1020 | (insn (set (reg:CCZ 17 flags) (compare:CCZ ...))) |
1021 | may result in returning NO_REGS, cause flags is written implicitly through |
1022 | CMP insn, which has no OP_OUT | OP_INOUT operands. */ |
1023 | return NO_REGS; |
1024 | } |
1025 | |
1026 | /* Calculate HARD_REGNO_RENAME_OK data for REGNO. */ |
1027 | static void |
1028 | init_hard_regno_rename (int regno) |
1029 | { |
1030 | int cur_reg; |
1031 | |
1032 | SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno); |
1033 | |
1034 | for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER76; cur_reg++) |
1035 | { |
1036 | /* We are not interested in renaming in other regs. */ |
1037 | if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg)) |
1038 | continue; |
1039 | |
1040 | if (HARD_REGNO_RENAME_OK (regno, cur_reg)(!((unsigned long) ((regno)) - (unsigned long) (8) <= (unsigned long) (15) - (unsigned long) (8)) && ((unsigned long ) ((regno)) - (unsigned long) (52) <= (unsigned long) (67) - (unsigned long) (52)) == ((unsigned long) ((cur_reg)) - (unsigned long) (52) <= (unsigned long) (67) - (unsigned long) (52) ))) |
1041 | SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg); |
1042 | } |
1043 | } |
1044 | |
1045 | /* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs |
1046 | data first. */ |
1047 | static inline bool |
1048 | sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED__attribute__ ((__unused__)), int to ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
1049 | { |
1050 | /* Check whether this is all calculated. */ |
1051 | if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from)) |
1052 | return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to); |
1053 | |
1054 | init_hard_regno_rename (from); |
1055 | |
1056 | return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to); |
1057 | } |
1058 | |
1059 | /* Calculate set of registers that are capable of holding MODE. */ |
1060 | static void |
1061 | init_regs_for_mode (machine_mode mode) |
1062 | { |
1063 | int cur_reg; |
1064 | |
1065 | CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]); |
1066 | |
1067 | for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER76; cur_reg++) |
1068 | { |
1069 | int nregs; |
1070 | int i; |
1071 | |
1072 | /* See whether it accepts all modes that occur in |
1073 | original insns. */ |
1074 | if (!targetm.hard_regno_mode_ok (cur_reg, mode)) |
1075 | continue; |
1076 | |
1077 | nregs = hard_regno_nregs (cur_reg, mode); |
1078 | |
1079 | for (i = nregs - 1; i >= 0; --i) |
1080 | if (fixed_regs(this_target_hard_regs->x_fixed_regs)[cur_reg + i] |
1081 | || global_regs[cur_reg + i] |
1082 | /* Can't use regs which aren't saved by |
1083 | the prologue. */ |
1084 | || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i) |
1085 | /* Can't use regs with non-null REG_BASE_VALUE, because adjusting |
1086 | it affects aliasing globally and invalidates all AV sets. */ |
1087 | || get_reg_base_value (cur_reg + i) |
1088 | #ifdef LEAF_REGISTERS |
1089 | /* We can't use a non-leaf register if we're in a |
1090 | leaf function. */ |
1091 | || (crtl(&x_rtl)->is_leaf |
1092 | && !LEAF_REGISTERS[cur_reg + i]) |
1093 | #endif |
1094 | ) |
1095 | break; |
1096 | |
1097 | if (i >= 0) |
1098 | continue; |
1099 | |
1100 | /* If the CUR_REG passed all the checks above, |
1101 | then it's ok. */ |
1102 | SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg); |
1103 | } |
1104 | |
1105 | sel_hrd.regs_for_mode_ok[mode] = true; |
1106 | } |
1107 | |
1108 | /* Init all register sets gathered in HRD. */ |
1109 | static void |
1110 | init_hard_regs_data (void) |
1111 | { |
1112 | int cur_reg = 0; |
1113 | int cur_mode = 0; |
1114 | |
1115 | CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used); |
1116 | for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER76; cur_reg++) |
1117 | if (df_regs_ever_live_p (cur_reg) |
1118 | || crtl(&x_rtl)->abi->clobbers_full_reg_p (cur_reg)) |
1119 | SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg); |
1120 | |
1121 | /* Initialize registers that are valid based on mode when this is |
1122 | really needed. */ |
1123 | for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++) |
1124 | sel_hrd.regs_for_mode_ok[cur_mode] = false; |
1125 | |
1126 | /* Mark that all HARD_REGNO_RENAME_OK is not calculated. */ |
1127 | for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER76; cur_reg++) |
1128 | CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]); |
1129 | |
1130 | #ifdef STACK_REGS |
1131 | CLEAR_HARD_REG_SET (sel_hrd.stack_regs); |
1132 | |
1133 | for (cur_reg = FIRST_STACK_REG8; cur_reg <= LAST_STACK_REG15; cur_reg++) |
1134 | SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg); |
1135 | #endif |
1136 | } |
1137 | |
1138 | /* Mark hardware regs in REG_RENAME_P that are not suitable |
1139 | for renaming rhs in INSN due to hardware restrictions (register class, |
1140 | modes compatibility etc). This doesn't affect original insn's dest reg, |
1141 | if it isn't in USED_REGS. DEF is a definition insn of rhs for which the |
1142 | destination register is sought. LHS (DEF->ORIG_INSN) may be REG or MEM. |
1143 | Registers that are in used_regs are always marked in |
1144 | unavailable_hard_regs as well. */ |
1145 | |
1146 | static void |
1147 | mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p, |
1148 | regset used_regs ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
1149 | { |
1150 | machine_mode mode; |
1151 | enum reg_class cl = NO_REGS; |
1152 | rtx orig_dest; |
1153 | unsigned cur_reg, regno; |
1154 | hard_reg_set_iterator hrsi; |
1155 | |
1156 | gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET)((void)(!(((enum rtx_code) (PATTERN (def->orig_insn))-> code) == SET) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1156, __FUNCTION__), 0 : 0)); |
1157 | gcc_assert (reg_rename_p)((void)(!(reg_rename_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1157, __FUNCTION__), 0 : 0)); |
1158 | |
1159 | orig_dest = SET_DEST (PATTERN (def->orig_insn))(((PATTERN (def->orig_insn))->u.fld[0]).rt_rtx); |
1160 | |
1161 | /* We have decided not to rename 'mem = something;' insns, as 'something' |
1162 | is usually a register. */ |
1163 | if (!REG_P (orig_dest)(((enum rtx_code) (orig_dest)->code) == REG)) |
1164 | return; |
1165 | |
1166 | regno = REGNO (orig_dest)(rhs_regno(orig_dest)); |
1167 | |
1168 | /* If before reload, don't try to work with pseudos. */ |
1169 | if (!reload_completed && !HARD_REGISTER_NUM_P (regno)((regno) < 76)) |
1170 | return; |
1171 | |
1172 | if (reload_completed) |
1173 | cl = get_reg_class (def->orig_insn); |
1174 | |
1175 | /* Stop if the original register is one of the fixed_regs, global_regs or |
1176 | frame pointer, or we could not discover its class. */ |
1177 | if (fixed_regs(this_target_hard_regs->x_fixed_regs)[regno] |
1178 | || global_regs[regno] |
1179 | || (!HARD_FRAME_POINTER_IS_FRAME_POINTER(6 == 19) && frame_pointer_needed((&x_rtl)->frame_pointer_needed) |
1180 | && regno == HARD_FRAME_POINTER_REGNUM6) |
1181 | || (HARD_FRAME_POINTER_IS_FRAME_POINTER(6 == 19) && frame_pointer_needed((&x_rtl)->frame_pointer_needed) |
1182 | && regno == FRAME_POINTER_REGNUM19) |
1183 | || (reload_completed && cl == NO_REGS)) |
1184 | { |
1185 | SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs); |
1186 | |
1187 | /* Give a chance for original register, if it isn't in used_regs. */ |
1188 | if (!def->crossed_call_abis) |
1189 | CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno); |
1190 | |
1191 | return; |
1192 | } |
1193 | |
1194 | /* If something allocated on stack in this function, mark frame pointer |
1195 | register unavailable, considering also modes. |
1196 | FIXME: it is enough to do this once per all original defs. */ |
1197 | if (frame_pointer_needed((&x_rtl)->frame_pointer_needed)) |
1198 | { |
1199 | add_to_hard_reg_set (®_rename_p->unavailable_hard_regs, |
1200 | Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), FRAME_POINTER_REGNUM19); |
1201 | |
1202 | if (!HARD_FRAME_POINTER_IS_FRAME_POINTER(6 == 19)) |
1203 | add_to_hard_reg_set (®_rename_p->unavailable_hard_regs, |
1204 | Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), HARD_FRAME_POINTER_REGNUM6); |
1205 | } |
1206 | |
1207 | #ifdef STACK_REGS |
1208 | /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS |
1209 | is equivalent to as if all stack regs were in this set. |
1210 | I.e. no stack register can be renamed, and even if it's an original |
1211 | register here we make sure it won't be lifted over it's previous def |
1212 | (it's previous def will appear as if it's a FIRST_STACK_REG def. |
1213 | The HARD_REGNO_RENAME_OK covers other cases in condition below. */ |
1214 | if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)((unsigned long) ((rhs_regno(orig_dest))) - (unsigned long) ( 8) <= (unsigned long) (15) - (unsigned long) (8)) |
1215 | && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG)bitmap_bit_p (used_regs, 8)) |
1216 | reg_rename_p->unavailable_hard_regs |= sel_hrd.stack_regs; |
1217 | #endif |
1218 | |
1219 | mode = GET_MODE (orig_dest)((machine_mode) (orig_dest)->mode); |
1220 | |
1221 | /* If there's a call on this path, make regs from full_reg_clobbers |
1222 | unavailable. |
1223 | |
1224 | ??? It would be better to track the set of clobbered registers |
1225 | directly, but that would be quite expensive in a def_t. */ |
1226 | if (def->crossed_call_abis) |
1227 | reg_rename_p->unavailable_hard_regs |
1228 | |= call_clobbers_in_region (def->crossed_call_abis, |
1229 | reg_class_contents(this_target_hard_regs->x_reg_class_contents)[ALL_REGS], mode); |
1230 | |
1231 | /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and |
1232 | crossed_call_abis, but not register classes. */ |
1233 | if (!reload_completed) |
1234 | return; |
1235 | |
1236 | /* Leave regs as 'available' only from the current |
1237 | register class. */ |
1238 | reg_rename_p->available_for_renaming = reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl]; |
1239 | |
1240 | /* Leave only registers available for this mode. */ |
1241 | if (!sel_hrd.regs_for_mode_ok[mode]) |
1242 | init_regs_for_mode (mode); |
1243 | reg_rename_p->available_for_renaming &= sel_hrd.regs_for_mode[mode]; |
1244 | |
1245 | /* Leave only those that are ok to rename. */ |
1246 | EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,for (hard_reg_set_iter_init (&(hrsi), (reg_rename_p->available_for_renaming ), (0), &(cur_reg)); hard_reg_set_iter_set (&(hrsi), & (cur_reg)); hard_reg_set_iter_next (&(hrsi), &(cur_reg ))) |
1247 | 0, cur_reg, hrsi)for (hard_reg_set_iter_init (&(hrsi), (reg_rename_p->available_for_renaming ), (0), &(cur_reg)); hard_reg_set_iter_set (&(hrsi), & (cur_reg)); hard_reg_set_iter_next (&(hrsi), &(cur_reg ))) |
1248 | { |
1249 | int nregs; |
1250 | int i; |
1251 | |
1252 | nregs = hard_regno_nregs (cur_reg, mode); |
1253 | gcc_assert (nregs > 0)((void)(!(nregs > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1253, __FUNCTION__), 0 : 0)); |
1254 | |
1255 | for (i = nregs - 1; i >= 0; --i) |
1256 | if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i)) |
1257 | break; |
1258 | |
1259 | if (i >= 0) |
1260 | CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming, |
1261 | cur_reg); |
1262 | } |
1263 | |
1264 | reg_rename_p->available_for_renaming &= ~reg_rename_p->unavailable_hard_regs; |
1265 | |
1266 | /* Regno is always ok from the renaming part of view, but it really |
1267 | could be in *unavailable_hard_regs already, so set it here instead |
1268 | of there. */ |
1269 | SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno); |
1270 | } |
1271 | |
1272 | /* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the |
1273 | best register more recently than REG2. */ |
1274 | static int reg_rename_tick[FIRST_PSEUDO_REGISTER76]; |
1275 | |
1276 | /* Indicates the number of times renaming happened before the current one. */ |
1277 | static int reg_rename_this_tick; |
1278 | |
1279 | /* Choose the register among free, that is suitable for storing |
1280 | the rhs value. |
1281 | |
1282 | ORIGINAL_INSNS is the list of insns where the operation (rhs) |
1283 | originally appears. There could be multiple original operations |
1284 | for single rhs since we moving it up and merging along different |
1285 | paths. |
1286 | |
1287 | Some code is adapted from regrename.cc (regrename_optimize). |
1288 | If original register is available, function returns it. |
1289 | Otherwise it performs the checks, so the new register should |
1290 | comply with the following: |
1291 | - it should not violate any live ranges (such registers are in |
1292 | REG_RENAME_P->available_for_renaming set); |
1293 | - it should not be in the HARD_REGS_USED regset; |
1294 | - it should be in the class compatible with original uses; |
1295 | - it should not be clobbered through reference with different mode; |
1296 | - if we're in the leaf function, then the new register should |
1297 | not be in the LEAF_REGISTERS; |
1298 | - etc. |
1299 | |
1300 | If several registers meet the conditions, the register with smallest |
1301 | tick is returned to achieve more even register allocation. |
1302 | |
1303 | If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true. |
1304 | |
1305 | If no register satisfies the above conditions, NULL_RTX is returned. */ |
1306 | static rtx |
1307 | choose_best_reg_1 (HARD_REG_SET hard_regs_used, |
1308 | struct reg_rename *reg_rename_p, |
1309 | def_list_t original_insns, bool *is_orig_reg_p_ptr) |
1310 | { |
1311 | int best_new_reg; |
1312 | unsigned cur_reg; |
1313 | machine_mode mode = VOIDmode((void) 0, E_VOIDmode); |
1314 | unsigned regno, i, n; |
1315 | hard_reg_set_iterator hrsi; |
1316 | def_list_iterator di; |
1317 | def_t def; |
1318 | |
1319 | /* If original register is available, return it. */ |
1320 | *is_orig_reg_p_ptr = true; |
1321 | |
1322 | FOR_EACH_DEF (def, di, original_insns)for (_list_iter_start (&((di)), &((original_insns)), false ); _list_iter_cond_def (*((di)).lp, &((def))); _list_iter_next (&((di)))) |
1323 | { |
1324 | rtx orig_dest = SET_DEST (PATTERN (def->orig_insn))(((PATTERN (def->orig_insn))->u.fld[0]).rt_rtx); |
1325 | |
1326 | gcc_assert (REG_P (orig_dest))((void)(!((((enum rtx_code) (orig_dest)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1326, __FUNCTION__), 0 : 0)); |
1327 | |
1328 | /* Check that all original operations have the same mode. |
1329 | This is done for the next loop; if we'd return from this |
1330 | loop, we'd check only part of them, but in this case |
1331 | it doesn't matter. */ |
1332 | if (mode == VOIDmode((void) 0, E_VOIDmode)) |
1333 | mode = GET_MODE (orig_dest)((machine_mode) (orig_dest)->mode); |
1334 | gcc_assert (mode == GET_MODE (orig_dest))((void)(!(mode == ((machine_mode) (orig_dest)->mode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1334, __FUNCTION__), 0 : 0)); |
1335 | |
1336 | regno = REGNO (orig_dest)(rhs_regno(orig_dest)); |
1337 | for (i = 0, n = REG_NREGS (orig_dest)((&(orig_dest)->u.reg)->nregs); i < n; i++) |
1338 | if (TEST_HARD_REG_BIT (hard_regs_used, regno + i)) |
1339 | break; |
1340 | |
1341 | /* All hard registers are available. */ |
1342 | if (i == n) |
1343 | { |
1344 | gcc_assert (mode != VOIDmode)((void)(!(mode != ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1344, __FUNCTION__), 0 : 0)); |
1345 | |
1346 | /* Hard registers should not be shared. */ |
1347 | return gen_rtx_REG (mode, regno); |
1348 | } |
1349 | } |
1350 | |
1351 | *is_orig_reg_p_ptr = false; |
1352 | best_new_reg = -1; |
1353 | |
1354 | /* Among all available regs choose the register that was |
1355 | allocated earliest. */ |
1356 | EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,for (hard_reg_set_iter_init (&(hrsi), (reg_rename_p->available_for_renaming ), (0), &(cur_reg)); hard_reg_set_iter_set (&(hrsi), & (cur_reg)); hard_reg_set_iter_next (&(hrsi), &(cur_reg ))) |
1357 | 0, cur_reg, hrsi)for (hard_reg_set_iter_init (&(hrsi), (reg_rename_p->available_for_renaming ), (0), &(cur_reg)); hard_reg_set_iter_set (&(hrsi), & (cur_reg)); hard_reg_set_iter_next (&(hrsi), &(cur_reg ))) |
1358 | if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg)) |
1359 | { |
1360 | /* Check that all hard regs for mode are available. */ |
1361 | for (i = 1, n = hard_regno_nregs (cur_reg, mode); i < n; i++) |
1362 | if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i) |
1363 | || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming, |
1364 | cur_reg + i)) |
1365 | break; |
1366 | |
1367 | if (i < n) |
1368 | continue; |
1369 | |
1370 | /* All hard registers are available. */ |
1371 | if (best_new_reg < 0 |
1372 | || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg]) |
1373 | { |
1374 | best_new_reg = cur_reg; |
1375 | |
1376 | /* Return immediately when we know there's no better reg. */ |
1377 | if (! reg_rename_tick[best_new_reg]) |
1378 | break; |
1379 | } |
1380 | } |
1381 | |
1382 | if (best_new_reg >= 0) |
1383 | { |
1384 | /* Use the check from the above loop. */ |
1385 | gcc_assert (mode != VOIDmode)((void)(!(mode != ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1385, __FUNCTION__), 0 : 0)); |
1386 | return gen_rtx_REG (mode, best_new_reg); |
1387 | } |
1388 | |
1389 | return NULL_RTX(rtx) 0; |
1390 | } |
1391 | |
1392 | /* A wrapper around choose_best_reg_1 () to verify that we make correct |
1393 | assumptions about available registers in the function. */ |
1394 | static rtx |
1395 | choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p, |
1396 | def_list_t original_insns, bool *is_orig_reg_p_ptr) |
1397 | { |
1398 | rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p, |
1399 | original_insns, is_orig_reg_p_ptr); |
1400 | |
1401 | /* FIXME loop over hard_regno_nregs here. */ |
1402 | gcc_assert (best_reg == NULL_RTX((void)(!(best_reg == (rtx) 0 || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used , (rhs_regno(best_reg)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1403, __FUNCTION__), 0 : 0)) |
1403 | || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg)))((void)(!(best_reg == (rtx) 0 || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used , (rhs_regno(best_reg)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1403, __FUNCTION__), 0 : 0)); |
1404 | |
1405 | return best_reg; |
1406 | } |
1407 | |
1408 | /* Choose the pseudo register for storing rhs value. As this is supposed |
1409 | to work before reload, we return either the original register or make |
1410 | the new one. The parameters are the same that in choose_nest_reg_1 |
1411 | functions, except that USED_REGS may contain pseudos. |
1412 | If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS. |
1413 | |
1414 | TODO: take into account register pressure while doing this. Up to this |
1415 | moment, this function would never return NULL for pseudos, but we should |
1416 | not rely on this. */ |
1417 | static rtx |
1418 | choose_best_pseudo_reg (regset used_regs, |
1419 | struct reg_rename *reg_rename_p, |
1420 | def_list_t original_insns, bool *is_orig_reg_p_ptr) |
1421 | { |
1422 | def_list_iterator i; |
1423 | def_t def; |
1424 | machine_mode mode = VOIDmode((void) 0, E_VOIDmode); |
1425 | bool bad_hard_regs = false; |
1426 | |
1427 | /* We should not use this after reload. */ |
1428 | gcc_assert (!reload_completed)((void)(!(!reload_completed) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1428, __FUNCTION__), 0 : 0)); |
1429 | |
1430 | /* If original register is available, return it. */ |
1431 | *is_orig_reg_p_ptr = true; |
1432 | |
1433 | FOR_EACH_DEF (def, i, original_insns)for (_list_iter_start (&((i)), &((original_insns)), false ); _list_iter_cond_def (*((i)).lp, &((def))); _list_iter_next (&((i)))) |
1434 | { |
1435 | rtx dest = SET_DEST (PATTERN (def->orig_insn))(((PATTERN (def->orig_insn))->u.fld[0]).rt_rtx); |
1436 | int orig_regno; |
1437 | |
1438 | gcc_assert (REG_P (dest))((void)(!((((enum rtx_code) (dest)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1438, __FUNCTION__), 0 : 0)); |
1439 | |
1440 | /* Check that all original operations have the same mode. */ |
1441 | if (mode == VOIDmode((void) 0, E_VOIDmode)) |
1442 | mode = GET_MODE (dest)((machine_mode) (dest)->mode); |
1443 | else |
1444 | gcc_assert (mode == GET_MODE (dest))((void)(!(mode == ((machine_mode) (dest)->mode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1444, __FUNCTION__), 0 : 0)); |
1445 | orig_regno = REGNO (dest)(rhs_regno(dest)); |
1446 | |
1447 | /* Check that nothing in used_regs intersects with orig_regno. When |
1448 | we have a hard reg here, still loop over hard_regno_nregs. */ |
1449 | if (HARD_REGISTER_NUM_P (orig_regno)((orig_regno) < 76)) |
1450 | { |
1451 | int j, n; |
1452 | for (j = 0, n = REG_NREGS (dest)((&(dest)->u.reg)->nregs); j < n; j++) |
1453 | if (REGNO_REG_SET_P (used_regs, orig_regno + j)bitmap_bit_p (used_regs, orig_regno + j)) |
1454 | break; |
1455 | if (j < n) |
1456 | continue; |
1457 | } |
1458 | else |
1459 | { |
1460 | if (REGNO_REG_SET_P (used_regs, orig_regno)bitmap_bit_p (used_regs, orig_regno)) |
1461 | continue; |
1462 | } |
1463 | if (HARD_REGISTER_NUM_P (orig_regno)((orig_regno) < 76)) |
1464 | { |
1465 | gcc_assert (df_regs_ever_live_p (orig_regno))((void)(!(df_regs_ever_live_p (orig_regno)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1465, __FUNCTION__), 0 : 0)); |
1466 | |
1467 | /* For hard registers, we have to check hardware imposed |
1468 | limitations (frame/stack registers, calls crossed). */ |
1469 | if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, |
1470 | orig_regno)) |
1471 | { |
1472 | /* Don't let register cross a call if it doesn't already |
1473 | cross one. This condition is written in accordance with |
1474 | that in sched-deps.cc sched_analyze_reg(). */ |
1475 | if (!reg_rename_p->crossed_call_abis |
1476 | || REG_N_CALLS_CROSSED (orig_regno)(reg_info_p[orig_regno].calls_crossed) > 0) |
1477 | return gen_rtx_REG (mode, orig_regno); |
1478 | } |
1479 | |
1480 | bad_hard_regs = true; |
1481 | } |
1482 | else |
1483 | return dest; |
1484 | } |
1485 | |
1486 | *is_orig_reg_p_ptr = false; |
1487 | |
1488 | /* We had some original hard registers that couldn't be used. |
1489 | Those were likely special. Don't try to create a pseudo. */ |
1490 | if (bad_hard_regs) |
1491 | return NULL_RTX(rtx) 0; |
1492 | |
1493 | /* We haven't found a register from original operations. Get a new one. |
1494 | FIXME: control register pressure somehow. */ |
1495 | { |
1496 | rtx new_reg = gen_reg_rtx (mode); |
1497 | |
1498 | gcc_assert (mode != VOIDmode)((void)(!(mode != ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1498, __FUNCTION__), 0 : 0)); |
1499 | |
1500 | max_regno = max_reg_num (); |
1501 | maybe_extend_reg_info_p (); |
1502 | REG_N_CALLS_CROSSED (REGNO (new_reg))(reg_info_p[(rhs_regno(new_reg))].calls_crossed) |
1503 | = reg_rename_p->crossed_call_abis ? 1 : 0; |
1504 | |
1505 | return new_reg; |
1506 | } |
1507 | } |
1508 | |
1509 | /* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE, |
1510 | USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS. */ |
1511 | static void |
1512 | verify_target_availability (expr_t expr, regset used_regs, |
1513 | struct reg_rename *reg_rename_p) |
1514 | { |
1515 | unsigned n, i, regno; |
1516 | machine_mode mode; |
1517 | bool target_available, live_available, hard_available; |
1518 | |
1519 | if (!REG_P (EXPR_LHS (expr))(((enum rtx_code) ((((((&((((expr)->vinsn))->id)))-> lhs))))->code) == REG) || EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) < 0) |
1520 | return; |
1521 | |
1522 | regno = expr_dest_regno (expr); |
1523 | mode = GET_MODE (EXPR_LHS (expr))((machine_mode) ((((((&((((expr)->vinsn))->id)))-> lhs))))->mode); |
1524 | target_available = EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) == 1; |
1525 | n = HARD_REGISTER_NUM_P (regno)((regno) < 76) ? hard_regno_nregs (regno, mode) : 1; |
1526 | |
1527 | live_available = hard_available = true; |
1528 | for (i = 0; i < n; i++) |
1529 | { |
1530 | if (bitmap_bit_p (used_regs, regno + i)) |
1531 | live_available = false; |
1532 | if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i)) |
1533 | hard_available = false; |
1534 | } |
1535 | |
1536 | /* When target is not available, it may be due to hard register |
1537 | restrictions, e.g. crosses calls, so we check hard_available too. */ |
1538 | if (target_available) |
1539 | gcc_assert (live_available)((void)(!(live_available) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1539, __FUNCTION__), 0 : 0)); |
1540 | else |
1541 | /* Check only if we haven't scheduled something on the previous fence, |
1542 | cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues |
1543 | and having more than one fence, we may end having targ_un in a block |
1544 | in which successors target register is actually available. |
1545 | |
1546 | The last condition handles the case when a dependence from a call insn |
1547 | was created in sched-deps.cc for insns with destination registers that |
1548 | never crossed a call before, but do cross one after our code motion. |
1549 | |
1550 | FIXME: in the latter case, we just uselessly called find_used_regs, |
1551 | because we can't move this expression with any other register |
1552 | as well. */ |
1553 | gcc_assert (scheduled_something_on_previous_fence || !live_available((void)(!(scheduled_something_on_previous_fence || !live_available || !hard_available || (!reload_completed && reg_rename_p ->crossed_call_abis && (reg_info_p[regno].calls_crossed ) == 0)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1557, __FUNCTION__), 0 : 0)) |
1554 | || !hard_available((void)(!(scheduled_something_on_previous_fence || !live_available || !hard_available || (!reload_completed && reg_rename_p ->crossed_call_abis && (reg_info_p[regno].calls_crossed ) == 0)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1557, __FUNCTION__), 0 : 0)) |
1555 | || (!reload_completed((void)(!(scheduled_something_on_previous_fence || !live_available || !hard_available || (!reload_completed && reg_rename_p ->crossed_call_abis && (reg_info_p[regno].calls_crossed ) == 0)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1557, __FUNCTION__), 0 : 0)) |
1556 | && reg_rename_p->crossed_call_abis((void)(!(scheduled_something_on_previous_fence || !live_available || !hard_available || (!reload_completed && reg_rename_p ->crossed_call_abis && (reg_info_p[regno].calls_crossed ) == 0)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1557, __FUNCTION__), 0 : 0)) |
1557 | && REG_N_CALLS_CROSSED (regno) == 0))((void)(!(scheduled_something_on_previous_fence || !live_available || !hard_available || (!reload_completed && reg_rename_p ->crossed_call_abis && (reg_info_p[regno].calls_crossed ) == 0)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1557, __FUNCTION__), 0 : 0)); |
1558 | } |
1559 | |
1560 | /* Collect unavailable registers due to liveness for EXPR from BNDS |
1561 | into USED_REGS. Save additional information about available |
1562 | registers and unavailable due to hardware restriction registers |
1563 | into REG_RENAME_P structure. Save original insns into ORIGINAL_INSNS |
1564 | list. */ |
1565 | static void |
1566 | collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs, |
1567 | struct reg_rename *reg_rename_p, |
1568 | def_list_t *original_insns) |
1569 | { |
1570 | for (; bnds; bnds = BLIST_NEXT (bnds)(((bnds)->next))) |
1571 | { |
1572 | bool res; |
1573 | av_set_t orig_ops = NULLnullptr; |
1574 | bnd_t bnd = BLIST_BND (bnds)(&(bnds)->u.bnd); |
1575 | |
1576 | /* If the chosen best expr doesn't belong to current boundary, |
1577 | skip it. */ |
1578 | if (!av_set_is_in_p (BND_AV1 (bnd)((bnd)->av1), EXPR_VINSN (expr)((expr)->vinsn))) |
1579 | continue; |
1580 | |
1581 | /* Put in ORIG_OPS all exprs from this boundary that became |
1582 | RES on top. */ |
1583 | orig_ops = find_sequential_best_exprs (bnd, expr, false); |
1584 | |
1585 | /* Compute used regs and OR it into the USED_REGS. */ |
1586 | res = find_used_regs (BND_TO (bnd)((bnd)->to), orig_ops, used_regs, |
1587 | reg_rename_p, original_insns); |
1588 | |
1589 | /* FIXME: the assert is true until we'd have several boundaries. */ |
1590 | gcc_assert (res)((void)(!(res) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1590, __FUNCTION__), 0 : 0)); |
1591 | av_set_clear (&orig_ops); |
1592 | } |
1593 | } |
1594 | |
1595 | /* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG. |
1596 | If BEST_REG is valid, replace LHS of EXPR with it. */ |
1597 | static bool |
1598 | try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr) |
1599 | { |
1600 | /* Try whether we'll be able to generate the insn |
1601 | 'dest := best_reg' at the place of the original operation. */ |
1602 | for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns)(((orig_insns)->next))) |
1603 | { |
1604 | insn_t orig_insn = DEF_LIST_DEF (orig_insns)(&(orig_insns)->u.def)->orig_insn; |
1605 | |
1606 | gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn)))((void)(!(((((((&(((((&(&s_i_d[(sched_luids[INSN_UID (orig_insn)])])->expr))->vinsn))->id)))->type)) == SET))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1606, __FUNCTION__), 0 : 0)); |
1607 | |
1608 | if (REGNO (best_reg)(rhs_regno(best_reg)) != REGNO (INSN_LHS (orig_insn))(rhs_regno((((((&((((((&(&s_i_d[(sched_luids[INSN_UID (orig_insn)])])->expr))->vinsn)))->id)))->lhs))) )) |
1609 | && (! replace_src_with_reg_ok_p (orig_insn, best_reg) |
1610 | || ! replace_dest_with_reg_ok_p (orig_insn, best_reg))) |
1611 | return false; |
1612 | } |
1613 | |
1614 | /* Make sure that EXPR has the right destination |
1615 | register. */ |
1616 | if (expr_dest_regno (expr) != REGNO (best_reg)(rhs_regno(best_reg))) |
1617 | replace_dest_with_reg_in_expr (expr, best_reg); |
1618 | else |
1619 | EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) = 1; |
1620 | |
1621 | return true; |
1622 | } |
1623 | |
1624 | /* Select and assign best register to EXPR searching from BNDS. |
1625 | Set *IS_ORIG_REG_P to TRUE if original register was selected. |
1626 | Return FALSE if no register can be chosen, which could happen when: |
1627 | * EXPR_SEPARABLE_P is true but we were unable to find suitable register; |
1628 | * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers |
1629 | that are used on the moving path. */ |
1630 | static bool |
1631 | find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p) |
1632 | { |
1633 | static struct reg_rename reg_rename_data; |
1634 | |
1635 | regset used_regs; |
1636 | def_list_t original_insns = NULLnullptr; |
1637 | bool reg_ok; |
1638 | |
1639 | *is_orig_reg_p = false; |
1640 | |
1641 | /* Don't bother to do anything if this insn doesn't set any registers. */ |
1642 | if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr))((((&((((expr)->vinsn))->id)))->reg_sets))) |
1643 | && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr))((((&((((expr)->vinsn))->id)))->reg_clobbers)))) |
1644 | return true; |
1645 | |
1646 | used_regs = get_clear_regset_from_pool (); |
1647 | CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs); |
1648 | |
1649 | collect_unavailable_regs_from_bnds (expr, bnds, used_regs, ®_rename_data, |
1650 | &original_insns); |
1651 | |
1652 | /* If after reload, make sure we're working with hard regs here. */ |
1653 | if (flag_checkingglobal_options.x_flag_checking && reload_completed) |
1654 | { |
1655 | reg_set_iterator rsi; |
1656 | unsigned i; |
1657 | |
1658 | EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)for (bmp_iter_set_init (&(rsi), (used_regs), (76), &( i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (& (rsi), &(i))) |
1659 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1659, __FUNCTION__)); |
1660 | } |
1661 | |
1662 | if (EXPR_SEPARABLE_P (expr)((((((&((((expr)->vinsn))->id)))->type)) == SET) )) |
1663 | { |
1664 | rtx best_reg = NULL_RTX(rtx) 0; |
1665 | /* Check that we have computed availability of a target register |
1666 | correctly. */ |
1667 | verify_target_availability (expr, used_regs, ®_rename_data); |
1668 | |
1669 | /* Turn everything in hard regs after reload. */ |
1670 | if (reload_completed) |
1671 | { |
1672 | HARD_REG_SET hard_regs_used; |
1673 | REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs)do { CLEAR_HARD_REG_SET (hard_regs_used); reg_set_to_hard_reg_set (&hard_regs_used, used_regs); } while (0); |
1674 | |
1675 | /* Join hard registers unavailable due to register class |
1676 | restrictions and live range intersection. */ |
1677 | hard_regs_used |= reg_rename_data.unavailable_hard_regs; |
1678 | |
1679 | best_reg = choose_best_reg (hard_regs_used, ®_rename_data, |
1680 | original_insns, is_orig_reg_p); |
1681 | } |
1682 | else |
1683 | best_reg = choose_best_pseudo_reg (used_regs, ®_rename_data, |
1684 | original_insns, is_orig_reg_p); |
1685 | |
1686 | if (!best_reg) |
1687 | reg_ok = false; |
1688 | else if (*is_orig_reg_p) |
1689 | { |
1690 | /* In case of unification BEST_REG may be different from EXPR's LHS |
1691 | when EXPR's LHS is unavailable, and there is another LHS among |
1692 | ORIGINAL_INSNS. */ |
1693 | reg_ok = try_replace_dest_reg (original_insns, best_reg, expr); |
1694 | } |
1695 | else |
1696 | { |
1697 | /* Forbid renaming of low-cost insns. */ |
1698 | if (sel_vinsn_cost (EXPR_VINSN (expr)((expr)->vinsn)) < 2) |
1699 | reg_ok = false; |
1700 | else |
1701 | reg_ok = try_replace_dest_reg (original_insns, best_reg, expr); |
1702 | } |
1703 | } |
1704 | else |
1705 | { |
1706 | /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set |
1707 | any of the HARD_REGS_USED set. */ |
1708 | if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr)((expr)->vinsn), used_regs, |
1709 | reg_rename_data.unavailable_hard_regs)) |
1710 | { |
1711 | reg_ok = false; |
1712 | gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0)((void)(!(((expr)->target_available) <= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1712, __FUNCTION__), 0 : 0)); |
1713 | } |
1714 | else |
1715 | { |
1716 | reg_ok = true; |
1717 | gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0)((void)(!(((expr)->target_available) != 0) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1717, __FUNCTION__), 0 : 0)); |
1718 | } |
1719 | } |
1720 | |
1721 | ilist_clear (&original_insns)(_list_clear (&original_insns)); |
1722 | return_regset_to_pool (used_regs); |
1723 | |
1724 | return reg_ok; |
1725 | } |
1726 | |
1727 | |
1728 | /* Return true if dependence described by DS can be overcomed. */ |
1729 | static bool |
1730 | can_speculate_dep_p (ds_t ds) |
1731 | { |
1732 | if (spec_info == NULLnullptr) |
1733 | return false; |
1734 | |
1735 | /* Leave only speculative data. */ |
1736 | ds &= SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
1737 | |
1738 | if (ds == 0) |
1739 | return false; |
1740 | |
1741 | { |
1742 | /* FIXME: make sched-deps.cc produce only those non-hard dependencies, |
1743 | that we can overcome. */ |
1744 | ds_t spec_mask = spec_info->mask; |
1745 | |
1746 | if ((ds & spec_mask) != ds) |
1747 | return false; |
1748 | } |
1749 | |
1750 | if (ds_weak (ds) < spec_info->data_weakness_cutoff) |
1751 | return false; |
1752 | |
1753 | return true; |
1754 | } |
1755 | |
1756 | /* Get a speculation check instruction. |
1757 | C_EXPR is a speculative expression, |
1758 | CHECK_DS describes speculations that should be checked, |
1759 | ORIG_INSN is the original non-speculative insn in the stream. */ |
1760 | static insn_t |
1761 | create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn) |
1762 | { |
1763 | rtx check_pattern; |
1764 | rtx_insn *insn_rtx; |
1765 | insn_t insn; |
1766 | basic_block recovery_block; |
1767 | rtx_insn *label; |
1768 | |
1769 | /* Create a recovery block if target is going to emit branchy check, or if |
1770 | ORIG_INSN was speculative already. */ |
1771 | if (targetm.sched.needs_block_p (check_ds) |
1772 | || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn))(((&(&s_i_d[(sched_luids[INSN_UID (orig_insn)])])-> expr))->spec_done_ds) != 0) |
1773 | { |
1774 | recovery_block = sel_create_recovery_block (orig_insn); |
1775 | label = BB_HEAD (recovery_block)(recovery_block)->il.x.head_; |
1776 | } |
1777 | else |
1778 | { |
1779 | recovery_block = NULLnullptr; |
1780 | label = NULLnullptr; |
1781 | } |
1782 | |
1783 | /* Get pattern of the check. */ |
1784 | check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr)(((((c_expr)->vinsn))->insn_rtx)), label, |
1785 | check_ds); |
1786 | |
1787 | gcc_assert (check_pattern != NULL)((void)(!(check_pattern != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1787, __FUNCTION__), 0 : 0)); |
1788 | |
1789 | /* Emit check. */ |
1790 | insn_rtx = create_insn_rtx_from_pattern (check_pattern, label); |
1791 | |
1792 | insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn)(&(&s_i_d[(sched_luids[INSN_UID (orig_insn)])])->expr ), |
1793 | INSN_SEQNO (orig_insn)((&s_i_d[(sched_luids[INSN_UID (orig_insn)])])->seqno), orig_insn); |
1794 | |
1795 | /* Make check to be non-speculative. */ |
1796 | EXPR_SPEC_DONE_DS (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->spec_done_ds) = 0; |
1797 | INSN_SPEC_CHECKED_DS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->spec_checked_ds ) = check_ds; |
1798 | |
1799 | /* Decrease priority of check by difference of load/check instruction |
1800 | latencies. */ |
1801 | EXPR_PRIORITY (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->priority) -= (sel_vinsn_cost (INSN_VINSN (orig_insn)((((&(&s_i_d[(sched_luids[INSN_UID (orig_insn)])])-> expr))->vinsn))) |
1802 | - sel_vinsn_cost (INSN_VINSN (insn)((((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->vinsn)))); |
1803 | |
1804 | /* Emit copy of original insn (though with replaced target register, |
1805 | if needed) to the recovery block. */ |
1806 | if (recovery_block != NULLnullptr) |
1807 | { |
1808 | rtx twin_rtx; |
1809 | |
1810 | twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr)(((((c_expr)->vinsn))->insn_rtx)))); |
1811 | twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX(rtx) 0); |
1812 | sel_gen_recovery_insn_from_rtx_after (twin_rtx, |
1813 | INSN_EXPR (orig_insn)(&(&s_i_d[(sched_luids[INSN_UID (orig_insn)])])->expr ), |
1814 | INSN_SEQNO (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->seqno), |
1815 | bb_note (recovery_block)); |
1816 | } |
1817 | |
1818 | /* If we've generated a data speculation check, make sure |
1819 | that all the bookkeeping instruction we'll create during |
1820 | this move_op () will allocate an ALAT entry so that the |
1821 | check won't fail. |
1822 | In case of control speculation we must convert C_EXPR to control |
1823 | speculative mode, because failing to do so will bring us an exception |
1824 | thrown by the non-control-speculative load. */ |
1825 | check_ds = ds_get_max_dep_weak (check_ds); |
1826 | speculate_expr (c_expr, check_ds); |
1827 | |
1828 | return insn; |
1829 | } |
1830 | |
1831 | /* True when INSN is a "regN = regN" copy. */ |
1832 | static bool |
1833 | identical_copy_p (rtx_insn *insn) |
1834 | { |
1835 | rtx lhs, rhs, pat; |
1836 | |
1837 | pat = PATTERN (insn); |
1838 | |
1839 | if (GET_CODE (pat)((enum rtx_code) (pat)->code) != SET) |
1840 | return false; |
1841 | |
1842 | lhs = SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx); |
1843 | if (!REG_P (lhs)(((enum rtx_code) (lhs)->code) == REG)) |
1844 | return false; |
1845 | |
1846 | rhs = SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx); |
1847 | if (!REG_P (rhs)(((enum rtx_code) (rhs)->code) == REG)) |
1848 | return false; |
1849 | |
1850 | return REGNO (lhs)(rhs_regno(lhs)) == REGNO (rhs)(rhs_regno(rhs)); |
1851 | } |
1852 | |
1853 | /* Undo all transformations on *AV_PTR that were done when |
1854 | moving through INSN. */ |
1855 | static void |
1856 | undo_transformations (av_set_t *av_ptr, rtx_insn *insn) |
1857 | { |
1858 | av_set_iterator av_iter; |
1859 | expr_t expr; |
1860 | av_set_t new_set = NULLnullptr; |
1861 | |
1862 | /* First, kill any EXPR that uses registers set by an insn. This is |
1863 | required for correctness. */ |
1864 | FOR_EACH_EXPR_1 (expr, av_iter, av_ptr)for (_list_iter_start (&((av_iter)), ((av_ptr)), true); _list_iter_cond_expr (*((av_iter)).lp, &((expr))); _list_iter_next (&((av_iter )))) |
1865 | if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx))) |
1866 | && bitmap_intersect_p (INSN_REG_SETS (insn)(((((&((((((&(&s_i_d[(sched_luids[INSN_UID (insn) ])])->expr))->vinsn)))->id)))->reg_sets))), |
1867 | VINSN_REG_USES (EXPR_VINSN (expr))((((&((((expr)->vinsn))->id)))->reg_uses))) |
1868 | /* When an insn looks like 'r1 = r1', we could substitute through |
1869 | it, but the above condition will still hold. This happened with |
1870 | gcc.c-torture/execute/961125-1.c. */ |
1871 | && !identical_copy_p (insn)) |
1872 | { |
1873 | if (sched_verbose >= 6) |
1874 | sel_print ("Expr %d removed due to use/set conflict\n", |
1875 | INSN_UID (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)))); |
1876 | av_set_iter_remove (&av_iter); |
1877 | } |
1878 | |
1879 | /* Undo transformations looking at the history vector. */ |
1880 | FOR_EACH_EXPR (expr, av_iter, *av_ptr)for (_list_iter_start (&((av_iter)), &((*av_ptr)), false ); _list_iter_cond_expr (*((av_iter)).lp, &((expr))); _list_iter_next (&((av_iter)))) |
1881 | { |
1882 | int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr)((expr)->history_of_changes), |
1883 | insn, EXPR_VINSN (expr)((expr)->vinsn), true); |
1884 | |
1885 | if (index >= 0) |
1886 | { |
1887 | expr_history_def *phist; |
1888 | |
1889 | phist = &EXPR_HISTORY_OF_CHANGES (expr)((expr)->history_of_changes)[index]; |
1890 | |
1891 | switch (phist->type) |
1892 | { |
1893 | case TRANS_SPECULATION: |
1894 | { |
1895 | ds_t old_ds, new_ds; |
1896 | |
1897 | /* Compute the difference between old and new speculative |
1898 | statuses: that's what we need to check. |
1899 | Earlier we used to assert that the status will really |
1900 | change. This no longer works because only the probability |
1901 | bits in the status may have changed during compute_av_set, |
1902 | and in the case of merging different probabilities of the |
1903 | same speculative status along different paths we do not |
1904 | record this in the history vector. */ |
1905 | old_ds = phist->spec_ds; |
1906 | new_ds = EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds); |
1907 | |
1908 | old_ds &= SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
1909 | new_ds &= SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
1910 | new_ds &= ~old_ds; |
1911 | |
1912 | EXPR_SPEC_TO_CHECK_DS (expr)((expr)->spec_to_check_ds) |= new_ds; |
1913 | break; |
1914 | } |
1915 | case TRANS_SUBSTITUTION: |
1916 | { |
1917 | expr_def _tmp_expr, *tmp_expr = &_tmp_expr; |
1918 | vinsn_t new_vi; |
1919 | bool add = true; |
1920 | |
1921 | new_vi = phist->old_expr_vinsn; |
1922 | |
1923 | gcc_assert (VINSN_SEPARABLE_P (new_vi)((void)(!((((((&((new_vi)->id)))->type)) == SET) == ((((((&((((expr)->vinsn))->id)))->type)) == SET ))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1924, __FUNCTION__), 0 : 0)) |
1924 | == EXPR_SEPARABLE_P (expr))((void)(!((((((&((new_vi)->id)))->type)) == SET) == ((((((&((((expr)->vinsn))->id)))->type)) == SET ))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1924, __FUNCTION__), 0 : 0)); |
1925 | copy_expr (tmp_expr, expr); |
1926 | |
1927 | if (vinsn_equal_p (phist->new_expr_vinsn, |
1928 | EXPR_VINSN (tmp_expr)((tmp_expr)->vinsn))) |
1929 | change_vinsn_in_expr (tmp_expr, new_vi); |
1930 | else |
1931 | /* This happens when we're unsubstituting on a bookkeeping |
1932 | copy, which was in turn substituted. The history is wrong |
1933 | in this case. Do it the hard way. */ |
1934 | add = substitute_reg_in_expr (tmp_expr, insn, true); |
1935 | if (add) |
1936 | av_set_add (&new_set, tmp_expr); |
1937 | clear_expr (tmp_expr); |
1938 | break; |
1939 | } |
1940 | default: |
1941 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1941, __FUNCTION__)); |
1942 | } |
1943 | } |
1944 | |
1945 | } |
1946 | |
1947 | av_set_union_and_clear (av_ptr, &new_set, NULLnullptr); |
1948 | } |
1949 | |
1950 | |
1951 | /* Moveup_* helpers for code motion and computing av sets. */ |
1952 | |
1953 | /* Propagates EXPR inside an insn group through THROUGH_INSN. |
1954 | The difference from the below function is that only substitution is |
1955 | performed. */ |
1956 | static enum MOVEUP_EXPR_CODE |
1957 | moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn) |
1958 | { |
1959 | vinsn_t vi = EXPR_VINSN (expr)((expr)->vinsn); |
1960 | ds_t *has_dep_p; |
1961 | ds_t full_ds; |
1962 | |
1963 | /* Do this only inside insn group. */ |
1964 | gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0)((void)(!(((&s_i_d[(sched_luids[INSN_UID (through_insn)]) ])->sched_cycle) > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1964, __FUNCTION__), 0 : 0)); |
1965 | |
1966 | full_ds = has_dependence_p (expr, through_insn, &has_dep_p); |
1967 | if (full_ds == 0) |
1968 | return MOVEUP_EXPR_SAME; |
1969 | |
1970 | /* Substitution is the possible choice in this case. */ |
1971 | if (has_dep_p[DEPS_IN_RHS]) |
1972 | { |
1973 | /* Can't substitute UNIQUE VINSNs. */ |
1974 | gcc_assert (!VINSN_UNIQUE_P (vi))((void)(!(!(!((((((&((vi)->id)))->type)) == SET) || ((((&((vi)->id)))->type)) == USE))) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1974, __FUNCTION__), 0 : 0)); |
1975 | |
1976 | if (can_substitute_through_p (through_insn, |
1977 | has_dep_p[DEPS_IN_RHS]) |
1978 | && substitute_reg_in_expr (expr, through_insn, false)) |
1979 | { |
1980 | EXPR_WAS_SUBSTITUTED (expr)((expr)->was_substituted) = true; |
1981 | return MOVEUP_EXPR_CHANGED; |
1982 | } |
1983 | |
1984 | /* Don't care about this, as even true dependencies may be allowed |
1985 | in an insn group. */ |
1986 | return MOVEUP_EXPR_SAME; |
1987 | } |
1988 | |
1989 | /* This can catch output dependencies in COND_EXECs. */ |
1990 | if (has_dep_p[DEPS_IN_INSN]) |
1991 | return MOVEUP_EXPR_NULL; |
1992 | |
1993 | /* This is either an output or an anti dependence, which usually have |
1994 | a zero latency. Allow this here, if we'd be wrong, tick_check_p |
1995 | will fix this. */ |
1996 | gcc_assert (has_dep_p[DEPS_IN_LHS])((void)(!(has_dep_p[DEPS_IN_LHS]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 1996, __FUNCTION__), 0 : 0)); |
1997 | return MOVEUP_EXPR_AS_RHS; |
1998 | } |
1999 | |
2000 | /* True when a trapping EXPR cannot be moved through THROUGH_INSN. */ |
2001 | #define CANT_MOVE_TRAPPING(expr, through_insn)(((((expr)->vinsn))->may_trap_p) && !sel_insn_has_single_succ_p ((through_insn), ((1) | (2) | (4))) && !sel_insn_is_speculation_check (through_insn)) \ |
2002 | (VINSN_MAY_TRAP_P (EXPR_VINSN (expr))((((expr)->vinsn))->may_trap_p) \ |
2003 | && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL((1) | (2) | (4))) \ |
2004 | && !sel_insn_is_speculation_check (through_insn)) |
2005 | |
2006 | /* True when a conflict on a target register was found during moveup_expr. */ |
2007 | static bool was_target_conflict = false; |
2008 | |
2009 | /* Return true when moving a debug INSN across THROUGH_INSN will |
2010 | create a bookkeeping block. We don't want to create such blocks, |
2011 | for they would cause codegen differences between compilations with |
2012 | and without debug info. */ |
2013 | |
2014 | static bool |
2015 | moving_insn_creates_bookkeeping_block_p (insn_t insn, |
2016 | insn_t through_insn) |
2017 | { |
2018 | basic_block bbi, bbt; |
2019 | edge e1, e2; |
2020 | edge_iterator ei1, ei2; |
2021 | |
2022 | if (!bookkeeping_can_be_created_if_moved_through_p (through_insn)) |
2023 | { |
2024 | if (sched_verbose >= 9) |
2025 | sel_print ("no bookkeeping required: "); |
2026 | return FALSEfalse; |
2027 | } |
2028 | |
2029 | bbi = BLOCK_FOR_INSN (insn); |
2030 | |
2031 | if (EDGE_COUNT (bbi->preds)vec_safe_length (bbi->preds) == 1) |
2032 | { |
2033 | if (sched_verbose >= 9) |
2034 | sel_print ("only one pred edge: "); |
2035 | return TRUEtrue; |
2036 | } |
2037 | |
2038 | bbt = BLOCK_FOR_INSN (through_insn); |
2039 | |
2040 | FOR_EACH_EDGE (e1, ei1, bbt->succs)for ((ei1) = ei_start_1 (&((bbt->succs))); ei_cond ((ei1 ), &(e1)); ei_next (&(ei1))) |
2041 | { |
2042 | FOR_EACH_EDGE (e2, ei2, bbi->preds)for ((ei2) = ei_start_1 (&((bbi->preds))); ei_cond ((ei2 ), &(e2)); ei_next (&(ei2))) |
2043 | { |
2044 | if (find_block_for_bookkeeping (e1, e2, TRUEtrue)) |
2045 | { |
2046 | if (sched_verbose >= 9) |
2047 | sel_print ("found existing block: "); |
2048 | return FALSEfalse; |
2049 | } |
2050 | } |
2051 | } |
2052 | |
2053 | if (sched_verbose >= 9) |
2054 | sel_print ("would create bookkeeping block: "); |
2055 | |
2056 | return TRUEtrue; |
2057 | } |
2058 | |
2059 | /* Return true when the conflict with newly created implicit clobbers |
2060 | between EXPR and THROUGH_INSN is found because of renaming. */ |
2061 | static bool |
2062 | implicit_clobber_conflict_p (insn_t through_insn, expr_t expr) |
2063 | { |
2064 | HARD_REG_SET temp; |
2065 | rtx_insn *insn; |
2066 | rtx reg, rhs, pat; |
2067 | hard_reg_set_iterator hrsi; |
2068 | unsigned regno; |
2069 | bool valid; |
2070 | |
2071 | /* Make a new pseudo register. */ |
2072 | reg = gen_reg_rtx (GET_MODE (EXPR_LHS (expr))((machine_mode) ((((((&((((expr)->vinsn))->id)))-> lhs))))->mode)); |
2073 | max_regno = max_reg_num (); |
2074 | maybe_extend_reg_info_p (); |
2075 | |
2076 | /* Validate a change and bail out early. */ |
2077 | insn = EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)); |
2078 | validate_change (insn, &SET_DEST (PATTERN (insn))(((PATTERN (insn))->u.fld[0]).rt_rtx), reg, true); |
2079 | valid = verify_changes (0); |
2080 | cancel_changes (0); |
2081 | if (!valid) |
2082 | { |
2083 | if (sched_verbose >= 6) |
2084 | sel_print ("implicit clobbers failed validation, "); |
2085 | return true; |
2086 | } |
2087 | |
2088 | /* Make a new insn with it. */ |
2089 | rhs = copy_rtx (VINSN_RHS (EXPR_VINSN (expr))((((&((((expr)->vinsn))->id)))->rhs))); |
2090 | pat = gen_rtx_SET (reg, rhs)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((reg)) , ((rhs)) ); |
2091 | start_sequence (); |
2092 | insn = emit_insn (pat); |
2093 | end_sequence (); |
2094 | |
2095 | /* Calculate implicit clobbers. */ |
2096 | extract_insn (insn); |
2097 | preprocess_constraints (insn); |
2098 | alternative_mask prefrred = get_preferred_alternatives (insn); |
2099 | ira_implicitly_set_insn_hard_regs (&temp, prefrred); |
2100 | temp &= ~ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs); |
2101 | |
2102 | /* If any implicit clobber registers intersect with regular ones in |
2103 | through_insn, we have a dependency and thus bail out. */ |
2104 | EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)for (hard_reg_set_iter_init (&(hrsi), (temp), (0), &( regno)); hard_reg_set_iter_set (&(hrsi), &(regno)); hard_reg_set_iter_next (&(hrsi), &(regno))) |
2105 | { |
2106 | vinsn_t vi = INSN_VINSN (through_insn)((((&(&s_i_d[(sched_luids[INSN_UID (through_insn)])]) ->expr))->vinsn)); |
2107 | if (bitmap_bit_p (VINSN_REG_SETS (vi)((((&((vi)->id)))->reg_sets)), regno) |
2108 | || bitmap_bit_p (VINSN_REG_CLOBBERS (vi)((((&((vi)->id)))->reg_clobbers)), regno) |
2109 | || bitmap_bit_p (VINSN_REG_USES (vi)((((&((vi)->id)))->reg_uses)), regno)) |
2110 | return true; |
2111 | } |
2112 | |
2113 | return false; |
2114 | } |
2115 | |
2116 | /* Modifies EXPR so it can be moved through the THROUGH_INSN, |
2117 | performing necessary transformations. Record the type of transformation |
2118 | made in PTRANS_TYPE, when it is not NULL. When INSIDE_INSN_GROUP, |
2119 | permit all dependencies except true ones, and try to remove those |
2120 | too via forward substitution. All cases when a non-eliminable |
2121 | non-zero cost dependency exists inside an insn group will be fixed |
2122 | in tick_check_p instead. */ |
2123 | static enum MOVEUP_EXPR_CODE |
2124 | moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group, |
2125 | enum local_trans_type *ptrans_type) |
2126 | { |
2127 | vinsn_t vi = EXPR_VINSN (expr)((expr)->vinsn); |
2128 | insn_t insn = VINSN_INSN_RTX (vi)((vi)->insn_rtx); |
2129 | bool was_changed = false; |
2130 | bool as_rhs = false; |
2131 | ds_t *has_dep_p; |
2132 | ds_t full_ds; |
2133 | |
2134 | /* ??? We use dependencies of non-debug insns on debug insns to |
2135 | indicate that the debug insns need to be reset if the non-debug |
2136 | insn is pulled ahead of it. It's hard to figure out how to |
2137 | introduce such a notion in sel-sched, but it already fails to |
2138 | support debug insns in other ways, so we just go ahead and |
2139 | let the deug insns go corrupt for now. */ |
2140 | if (DEBUG_INSN_P (through_insn)(((enum rtx_code) (through_insn)->code) == DEBUG_INSN) && !DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
2141 | return MOVEUP_EXPR_SAME; |
2142 | |
2143 | /* When inside_insn_group, delegate to the helper. */ |
2144 | if (inside_insn_group) |
2145 | return moveup_expr_inside_insn_group (expr, through_insn); |
2146 | |
2147 | /* Deal with unique insns and control dependencies. */ |
2148 | if (VINSN_UNIQUE_P (vi)(!((((((&((vi)->id)))->type)) == SET) || ((((&( (vi)->id)))->type)) == USE))) |
2149 | { |
2150 | /* We can move jumps without side-effects or jumps that are |
2151 | mutually exclusive with instruction THROUGH_INSN (all in cases |
2152 | dependencies allow to do so and jump is not speculative). */ |
2153 | if (control_flow_insn_p (insn)) |
2154 | { |
2155 | basic_block fallthru_bb; |
2156 | |
2157 | /* Do not move checks and do not move jumps through other |
2158 | jumps. */ |
2159 | if (control_flow_insn_p (through_insn) |
2160 | || sel_insn_is_speculation_check (insn)) |
2161 | return MOVEUP_EXPR_NULL; |
2162 | |
2163 | /* Don't move jumps through CFG joins. */ |
2164 | if (bookkeeping_can_be_created_if_moved_through_p (through_insn)) |
2165 | return MOVEUP_EXPR_NULL; |
2166 | |
2167 | /* The jump should have a clear fallthru block, and |
2168 | this block should be in the current region. */ |
2169 | if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULLnullptr |
2170 | || ! in_current_region_p (fallthru_bb)) |
2171 | return MOVEUP_EXPR_NULL; |
2172 | |
2173 | /* And it should be mutually exclusive with through_insn. */ |
2174 | if (! sched_insns_conditions_mutex_p (insn, through_insn) |
2175 | && ! DEBUG_INSN_P (through_insn)(((enum rtx_code) (through_insn)->code) == DEBUG_INSN)) |
2176 | return MOVEUP_EXPR_NULL; |
2177 | } |
2178 | |
2179 | /* Don't move what we can't move. */ |
2180 | if (EXPR_CANT_MOVE (expr)((expr)->cant_move) |
2181 | && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)) |
2182 | return MOVEUP_EXPR_NULL; |
2183 | |
2184 | /* Don't move SCHED_GROUP instruction through anything. |
2185 | If we don't force this, then it will be possible to start |
2186 | scheduling a sched_group before all its dependencies are |
2187 | resolved. |
2188 | ??? Haifa deals with this issue by delaying the SCHED_GROUP |
2189 | as late as possible through rank_for_schedule. */ |
2190 | if (SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2190, __FUNCTION__); _rtx; })->in_struct)) |
2191 | return MOVEUP_EXPR_NULL; |
2192 | } |
2193 | else |
2194 | gcc_assert (!control_flow_insn_p (insn))((void)(!(!control_flow_insn_p (insn)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2194, __FUNCTION__), 0 : 0)); |
2195 | |
2196 | /* Don't move debug insns if this would require bookkeeping. */ |
2197 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) |
2198 | && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn) |
2199 | && moving_insn_creates_bookkeeping_block_p (insn, through_insn)) |
2200 | return MOVEUP_EXPR_NULL; |
2201 | |
2202 | /* Deal with data dependencies. */ |
2203 | was_target_conflict = false; |
2204 | full_ds = has_dependence_p (expr, through_insn, &has_dep_p); |
2205 | if (full_ds == 0) |
2206 | { |
2207 | if (!CANT_MOVE_TRAPPING (expr, through_insn)(((((expr)->vinsn))->may_trap_p) && !sel_insn_has_single_succ_p ((through_insn), ((1) | (2) | (4))) && !sel_insn_is_speculation_check (through_insn))) |
2208 | return MOVEUP_EXPR_SAME; |
2209 | } |
2210 | else |
2211 | { |
2212 | /* We can move UNIQUE insn up only as a whole and unchanged, |
2213 | so it shouldn't have any dependencies. */ |
2214 | if (VINSN_UNIQUE_P (vi)(!((((((&((vi)->id)))->type)) == SET) || ((((&( (vi)->id)))->type)) == USE))) |
2215 | return MOVEUP_EXPR_NULL; |
2216 | } |
2217 | |
2218 | if (full_ds != 0 && can_speculate_dep_p (full_ds)) |
2219 | { |
2220 | int res; |
2221 | |
2222 | res = speculate_expr (expr, full_ds); |
2223 | if (res >= 0) |
2224 | { |
2225 | /* Speculation was successful. */ |
2226 | full_ds = 0; |
2227 | was_changed = (res > 0); |
2228 | if (res == 2) |
2229 | was_target_conflict = true; |
2230 | if (ptrans_type) |
2231 | *ptrans_type = TRANS_SPECULATION; |
2232 | sel_clear_has_dependence (); |
2233 | } |
2234 | } |
2235 | |
2236 | if (has_dep_p[DEPS_IN_INSN]) |
2237 | /* We have some dependency that cannot be discarded. */ |
2238 | return MOVEUP_EXPR_NULL; |
2239 | |
2240 | if (has_dep_p[DEPS_IN_LHS]) |
2241 | { |
2242 | /* Only separable insns can be moved up with the new register. |
2243 | Anyways, we should mark that the original register is |
2244 | unavailable. */ |
2245 | if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr)((((((&((((expr)->vinsn))->id)))->type)) == SET) )) |
2246 | return MOVEUP_EXPR_NULL; |
2247 | |
2248 | /* When renaming a hard register to a pseudo before reload, extra |
2249 | dependencies can occur from the implicit clobbers of the insn. |
2250 | Filter out such cases here. */ |
2251 | if (!reload_completed && REG_P (EXPR_LHS (expr))(((enum rtx_code) ((((((&((((expr)->vinsn))->id)))-> lhs))))->code) == REG) |
2252 | && HARD_REGISTER_P (EXPR_LHS (expr))((((rhs_regno((((((&((((expr)->vinsn))->id)))->lhs )))))) < 76)) |
2253 | && implicit_clobber_conflict_p (through_insn, expr)) |
2254 | { |
2255 | if (sched_verbose >= 6) |
2256 | sel_print ("implicit clobbers conflict detected, "); |
2257 | return MOVEUP_EXPR_NULL; |
2258 | } |
2259 | EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) = false; |
2260 | was_target_conflict = true; |
2261 | as_rhs = true; |
2262 | } |
2263 | |
2264 | /* At this point we have either separable insns, that will be lifted |
2265 | up only as RHSes, or non-separable insns with no dependency in lhs. |
2266 | If dependency is in RHS, then try to perform substitution and move up |
2267 | substituted RHS: |
2268 | |
2269 | Ex. 1: Ex.2 |
2270 | y = x; y = x; |
2271 | z = y*2; y = y*2; |
2272 | |
2273 | In Ex.1 y*2 can be substituted for x*2 and the whole operation can be |
2274 | moved above y=x assignment as z=x*2. |
2275 | |
2276 | In Ex.2 y*2 also can be substituted for x*2, but only the right hand |
2277 | side can be moved because of the output dependency. The operation was |
2278 | cropped to its rhs above. */ |
2279 | if (has_dep_p[DEPS_IN_RHS]) |
2280 | { |
2281 | ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS]; |
2282 | |
2283 | /* Can't substitute UNIQUE VINSNs. */ |
2284 | gcc_assert (!VINSN_UNIQUE_P (vi))((void)(!(!(!((((((&((vi)->id)))->type)) == SET) || ((((&((vi)->id)))->type)) == USE))) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2284, __FUNCTION__), 0 : 0)); |
2285 | |
2286 | if (can_speculate_dep_p (*rhs_dsp)) |
2287 | { |
2288 | int res; |
2289 | |
2290 | res = speculate_expr (expr, *rhs_dsp); |
2291 | if (res >= 0) |
2292 | { |
2293 | /* Speculation was successful. */ |
2294 | *rhs_dsp = 0; |
2295 | was_changed = (res > 0); |
2296 | if (res == 2) |
2297 | was_target_conflict = true; |
2298 | if (ptrans_type) |
2299 | *ptrans_type = TRANS_SPECULATION; |
2300 | } |
2301 | else |
2302 | return MOVEUP_EXPR_NULL; |
2303 | } |
2304 | else if (can_substitute_through_p (through_insn, |
2305 | *rhs_dsp) |
2306 | && substitute_reg_in_expr (expr, through_insn, false)) |
2307 | { |
2308 | /* ??? We cannot perform substitution AND speculation on the same |
2309 | insn. */ |
2310 | gcc_assert (!was_changed)((void)(!(!was_changed) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2310, __FUNCTION__), 0 : 0)); |
2311 | was_changed = true; |
2312 | if (ptrans_type) |
2313 | *ptrans_type = TRANS_SUBSTITUTION; |
2314 | EXPR_WAS_SUBSTITUTED (expr)((expr)->was_substituted) = true; |
2315 | } |
2316 | else |
2317 | return MOVEUP_EXPR_NULL; |
2318 | } |
2319 | |
2320 | /* Don't move trapping insns through jumps. |
2321 | This check should be at the end to give a chance to control speculation |
2322 | to perform its duties. */ |
2323 | if (CANT_MOVE_TRAPPING (expr, through_insn)(((((expr)->vinsn))->may_trap_p) && !sel_insn_has_single_succ_p ((through_insn), ((1) | (2) | (4))) && !sel_insn_is_speculation_check (through_insn))) |
2324 | return MOVEUP_EXPR_NULL; |
2325 | |
2326 | return (was_changed |
2327 | ? MOVEUP_EXPR_CHANGED |
2328 | : (as_rhs |
2329 | ? MOVEUP_EXPR_AS_RHS |
2330 | : MOVEUP_EXPR_SAME)); |
2331 | } |
2332 | |
2333 | /* Try to look at bitmap caches for EXPR and INSN pair, return true |
2334 | if successful. When INSIDE_INSN_GROUP, also try ignore dependencies |
2335 | that can exist within a parallel group. Write to RES the resulting |
2336 | code for moveup_expr. */ |
2337 | static bool |
2338 | try_bitmap_cache (expr_t expr, insn_t insn, |
2339 | bool inside_insn_group, |
2340 | enum MOVEUP_EXPR_CODE *res) |
2341 | { |
2342 | int expr_uid = INSN_UID (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx))); |
2343 | |
2344 | /* First check whether we've analyzed this situation already. */ |
2345 | if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->analyzed_deps ), expr_uid)) |
2346 | { |
2347 | if (bitmap_bit_p (INSN_FOUND_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->found_deps), expr_uid)) |
2348 | { |
2349 | if (sched_verbose >= 6) |
2350 | sel_print ("removed (cached)\n"); |
2351 | *res = MOVEUP_EXPR_NULL; |
2352 | return true; |
2353 | } |
2354 | else |
2355 | { |
2356 | if (sched_verbose >= 6) |
2357 | sel_print ("unchanged (cached)\n"); |
2358 | *res = MOVEUP_EXPR_SAME; |
2359 | return true; |
2360 | } |
2361 | } |
2362 | else if (bitmap_bit_p (INSN_FOUND_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->found_deps), expr_uid)) |
2363 | { |
2364 | if (inside_insn_group) |
2365 | { |
2366 | if (sched_verbose >= 6) |
2367 | sel_print ("unchanged (as RHS, cached, inside insn group)\n"); |
2368 | *res = MOVEUP_EXPR_SAME; |
2369 | return true; |
2370 | |
2371 | } |
2372 | else |
2373 | EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) = false; |
2374 | |
2375 | /* This is the only case when propagation result can change over time, |
2376 | as we can dynamically switch off scheduling as RHS. In this case, |
2377 | just check the flag to reach the correct decision. */ |
2378 | if (enable_schedule_as_rhs_p) |
2379 | { |
2380 | if (sched_verbose >= 6) |
2381 | sel_print ("unchanged (as RHS, cached)\n"); |
2382 | *res = MOVEUP_EXPR_AS_RHS; |
2383 | return true; |
2384 | } |
2385 | else |
2386 | { |
2387 | if (sched_verbose >= 6) |
2388 | sel_print ("removed (cached as RHS, but renaming" |
2389 | " is now disabled)\n"); |
2390 | *res = MOVEUP_EXPR_NULL; |
2391 | return true; |
2392 | } |
2393 | } |
2394 | |
2395 | return false; |
2396 | } |
2397 | |
2398 | /* Try to look at bitmap caches for EXPR and INSN pair, return true |
2399 | if successful. Write to RES the resulting code for moveup_expr. */ |
2400 | static bool |
2401 | try_transformation_cache (expr_t expr, insn_t insn, |
2402 | enum MOVEUP_EXPR_CODE *res) |
2403 | { |
2404 | struct transformed_insns *pti |
2405 | = (struct transformed_insns *) |
2406 | htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->transformed_insns ), |
2407 | &EXPR_VINSN (expr)((expr)->vinsn), |
2408 | VINSN_HASH_RTX (EXPR_VINSN (expr))((((expr)->vinsn))->hash_rtx)); |
2409 | if (pti) |
2410 | { |
2411 | /* This EXPR was already moved through this insn and was |
2412 | changed as a result. Fetch the proper data from |
2413 | the hashtable. */ |
2414 | insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr)((expr)->history_of_changes), |
2415 | INSN_UID (insn), pti->type, |
2416 | pti->vinsn_old, pti->vinsn_new, |
2417 | EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds)); |
2418 | |
2419 | if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new))(PREV_INSN (((pti->vinsn_new)->insn_rtx)) && NEXT_INSN (((pti->vinsn_new)->insn_rtx)))) |
2420 | pti->vinsn_new = vinsn_copy (pti->vinsn_new, true); |
2421 | change_vinsn_in_expr (expr, pti->vinsn_new); |
2422 | if (pti->was_target_conflict) |
2423 | EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) = false; |
2424 | if (pti->type == TRANS_SPECULATION) |
2425 | { |
2426 | EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds) = pti->ds; |
2427 | EXPR_NEEDS_SPEC_CHECK_P (expr)((expr)->needs_spec_check_p) |= pti->needs_check; |
2428 | } |
2429 | |
2430 | if (sched_verbose >= 6) |
2431 | { |
2432 | sel_print ("changed (cached): "); |
2433 | dump_expr (expr); |
2434 | sel_print ("\n"); |
2435 | } |
2436 | |
2437 | *res = MOVEUP_EXPR_CHANGED; |
2438 | return true; |
2439 | } |
2440 | |
2441 | return false; |
2442 | } |
2443 | |
2444 | /* Update bitmap caches on INSN with result RES of propagating EXPR. */ |
2445 | static void |
2446 | update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group, |
2447 | enum MOVEUP_EXPR_CODE res) |
2448 | { |
2449 | int expr_uid = INSN_UID (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx))); |
2450 | |
2451 | /* Do not cache result of propagating jumps through an insn group, |
2452 | as it is always true, which is not useful outside the group. */ |
2453 | if (inside_insn_group) |
2454 | return; |
2455 | |
2456 | if (res == MOVEUP_EXPR_NULL) |
2457 | { |
2458 | bitmap_set_bit (INSN_ANALYZED_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->analyzed_deps ), expr_uid); |
2459 | bitmap_set_bit (INSN_FOUND_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->found_deps), expr_uid); |
2460 | } |
2461 | else if (res == MOVEUP_EXPR_SAME) |
2462 | { |
2463 | bitmap_set_bit (INSN_ANALYZED_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->analyzed_deps ), expr_uid); |
2464 | bitmap_clear_bit (INSN_FOUND_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->found_deps), expr_uid); |
2465 | } |
2466 | else if (res == MOVEUP_EXPR_AS_RHS) |
2467 | { |
2468 | bitmap_clear_bit (INSN_ANALYZED_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->analyzed_deps ), expr_uid); |
2469 | bitmap_set_bit (INSN_FOUND_DEPS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->found_deps), expr_uid); |
2470 | } |
2471 | else |
2472 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2472, __FUNCTION__)); |
2473 | } |
2474 | |
2475 | /* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN |
2476 | and transformation type TRANS_TYPE. */ |
2477 | static void |
2478 | update_transformation_cache (expr_t expr, insn_t insn, |
2479 | bool inside_insn_group, |
2480 | enum local_trans_type trans_type, |
2481 | vinsn_t expr_old_vinsn) |
2482 | { |
2483 | struct transformed_insns *pti; |
2484 | |
2485 | if (inside_insn_group) |
2486 | return; |
2487 | |
2488 | pti = XNEW (struct transformed_insns)((struct transformed_insns *) xmalloc (sizeof (struct transformed_insns ))); |
2489 | pti->vinsn_old = expr_old_vinsn; |
2490 | pti->vinsn_new = EXPR_VINSN (expr)((expr)->vinsn); |
2491 | pti->type = trans_type; |
2492 | pti->was_target_conflict = was_target_conflict; |
2493 | pti->ds = EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds); |
2494 | pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr)((expr)->needs_spec_check_p); |
2495 | vinsn_attach (pti->vinsn_old); |
2496 | vinsn_attach (pti->vinsn_new); |
2497 | *((struct transformed_insns **) |
2498 | htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->transformed_insns ), |
2499 | pti, VINSN_HASH_RTX (expr_old_vinsn)((expr_old_vinsn)->hash_rtx), |
2500 | INSERT)) = pti; |
2501 | } |
2502 | |
2503 | /* Same as moveup_expr, but first looks up the result of |
2504 | transformation in caches. */ |
2505 | static enum MOVEUP_EXPR_CODE |
2506 | moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group) |
2507 | { |
2508 | enum MOVEUP_EXPR_CODE res; |
2509 | bool got_answer = false; |
2510 | |
2511 | if (sched_verbose >= 6) |
2512 | { |
2513 | sel_print ("Moving "); |
2514 | dump_expr (expr); |
2515 | sel_print (" through %d: ", INSN_UID (insn)); |
2516 | } |
2517 | |
2518 | if (DEBUG_INSN_P (EXPR_INSN_RTX (expr))(((enum rtx_code) ((((((expr)->vinsn))->insn_rtx)))-> code) == DEBUG_INSN) |
2519 | && BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx))) |
2520 | && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)))) |
2521 | == EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)))) |
2522 | /* Don't use cached information for debug insns that are heads of |
2523 | basic blocks. */; |
2524 | else if (try_bitmap_cache (expr, insn, inside_insn_group, &res)) |
2525 | /* When inside insn group, we do not want remove stores conflicting |
2526 | with previosly issued loads. */ |
2527 | got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL; |
2528 | else if (try_transformation_cache (expr, insn, &res)) |
2529 | got_answer = true; |
2530 | |
2531 | if (! got_answer) |
2532 | { |
2533 | /* Invoke moveup_expr and record the results. */ |
2534 | vinsn_t expr_old_vinsn = EXPR_VINSN (expr)((expr)->vinsn); |
2535 | ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds); |
2536 | int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn)((expr_old_vinsn)->insn_rtx)); |
2537 | bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn)(!((((((&((expr_old_vinsn)->id)))->type)) == SET) || ((((&((expr_old_vinsn)->id)))->type)) == USE)); |
2538 | enum local_trans_type trans_type = TRANS_SUBSTITUTION; |
2539 | |
2540 | /* ??? Invent something better than this. We can't allow old_vinsn |
2541 | to go, we need it for the history vector. */ |
2542 | vinsn_attach (expr_old_vinsn); |
2543 | |
2544 | res = moveup_expr (expr, insn, inside_insn_group, |
2545 | &trans_type); |
2546 | switch (res) |
2547 | { |
2548 | case MOVEUP_EXPR_NULL: |
2549 | update_bitmap_cache (expr, insn, inside_insn_group, res); |
2550 | if (sched_verbose >= 6) |
2551 | sel_print ("removed\n"); |
2552 | break; |
2553 | |
2554 | case MOVEUP_EXPR_SAME: |
2555 | update_bitmap_cache (expr, insn, inside_insn_group, res); |
2556 | if (sched_verbose >= 6) |
2557 | sel_print ("unchanged\n"); |
2558 | break; |
2559 | |
2560 | case MOVEUP_EXPR_AS_RHS: |
2561 | gcc_assert (!unique_p || inside_insn_group)((void)(!(!unique_p || inside_insn_group) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2561, __FUNCTION__), 0 : 0)); |
2562 | update_bitmap_cache (expr, insn, inside_insn_group, res); |
2563 | if (sched_verbose >= 6) |
2564 | sel_print ("unchanged (as RHS)\n"); |
2565 | break; |
2566 | |
2567 | case MOVEUP_EXPR_CHANGED: |
2568 | gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid((void)(!(INSN_UID ((((((expr)->vinsn))->insn_rtx))) != expr_uid || ((expr)->spec_done_ds) != expr_old_spec_ds) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2569, __FUNCTION__), 0 : 0)) |
2569 | || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds)((void)(!(INSN_UID ((((((expr)->vinsn))->insn_rtx))) != expr_uid || ((expr)->spec_done_ds) != expr_old_spec_ds) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2569, __FUNCTION__), 0 : 0)); |
2570 | insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr)((expr)->history_of_changes), |
2571 | INSN_UID (insn), trans_type, |
2572 | expr_old_vinsn, EXPR_VINSN (expr)((expr)->vinsn), |
2573 | expr_old_spec_ds); |
2574 | update_transformation_cache (expr, insn, inside_insn_group, |
2575 | trans_type, expr_old_vinsn); |
2576 | if (sched_verbose >= 6) |
2577 | { |
2578 | sel_print ("changed: "); |
2579 | dump_expr (expr); |
2580 | sel_print ("\n"); |
2581 | } |
2582 | break; |
2583 | default: |
2584 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2584, __FUNCTION__)); |
2585 | } |
2586 | |
2587 | vinsn_detach (expr_old_vinsn); |
2588 | } |
2589 | |
2590 | return res; |
2591 | } |
2592 | |
2593 | /* Moves an av set AVP up through INSN, performing necessary |
2594 | transformations. */ |
2595 | static void |
2596 | moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group) |
2597 | { |
2598 | av_set_iterator i; |
2599 | expr_t expr; |
2600 | |
2601 | FOR_EACH_EXPR_1 (expr, i, avp)for (_list_iter_start (&((i)), ((avp)), true); _list_iter_cond_expr (*((i)).lp, &((expr))); _list_iter_next (&((i)))) |
2602 | { |
2603 | |
2604 | switch (moveup_expr_cached (expr, insn, inside_insn_group)) |
2605 | { |
2606 | case MOVEUP_EXPR_SAME: |
2607 | case MOVEUP_EXPR_AS_RHS: |
2608 | break; |
2609 | |
2610 | case MOVEUP_EXPR_NULL: |
2611 | av_set_iter_remove (&i); |
2612 | break; |
2613 | |
2614 | case MOVEUP_EXPR_CHANGED: |
2615 | expr = merge_with_other_exprs (avp, &i, expr); |
2616 | break; |
2617 | |
2618 | default: |
2619 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2619, __FUNCTION__)); |
2620 | } |
2621 | } |
2622 | } |
2623 | |
2624 | /* Moves AVP set along PATH. */ |
2625 | static void |
2626 | moveup_set_inside_insn_group (av_set_t *avp, ilist_t path) |
2627 | { |
2628 | int last_cycle; |
2629 | |
2630 | if (sched_verbose >= 6) |
2631 | sel_print ("Moving expressions up in the insn group...\n"); |
2632 | if (! path) |
2633 | return; |
2634 | last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path))((&s_i_d[(sched_luids[INSN_UID (((path)->u.insn))])])-> sched_cycle); |
2635 | while (path |
2636 | && INSN_SCHED_CYCLE (ILIST_INSN (path))((&s_i_d[(sched_luids[INSN_UID (((path)->u.insn))])])-> sched_cycle) == last_cycle) |
2637 | { |
2638 | moveup_set_expr (avp, ILIST_INSN (path)((path)->u.insn), true); |
2639 | path = ILIST_NEXT (path)(((path)->next)); |
2640 | } |
2641 | } |
2642 | |
2643 | /* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW. */ |
2644 | static bool |
2645 | equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw) |
2646 | { |
2647 | expr_def _tmp, *tmp = &_tmp; |
2648 | int last_cycle; |
2649 | bool res = true; |
2650 | |
2651 | copy_expr_onside (tmp, expr); |
2652 | last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path))((&s_i_d[(sched_luids[INSN_UID (((path)->u.insn))])])-> sched_cycle) : 0; |
2653 | while (path |
2654 | && res |
2655 | && INSN_SCHED_CYCLE (ILIST_INSN (path))((&s_i_d[(sched_luids[INSN_UID (((path)->u.insn))])])-> sched_cycle) == last_cycle) |
2656 | { |
2657 | res = (moveup_expr_cached (tmp, ILIST_INSN (path)((path)->u.insn), true) |
2658 | != MOVEUP_EXPR_NULL); |
2659 | path = ILIST_NEXT (path)(((path)->next)); |
2660 | } |
2661 | |
2662 | if (res) |
2663 | { |
2664 | vinsn_t tmp_vinsn = EXPR_VINSN (tmp)((tmp)->vinsn); |
2665 | vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw)((expr_vliw)->vinsn); |
2666 | |
2667 | if (tmp_vinsn != expr_vliw_vinsn) |
2668 | res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn); |
2669 | } |
2670 | |
2671 | clear_expr (tmp); |
2672 | return res; |
2673 | } |
2674 | |
2675 | |
2676 | /* Functions that compute av and lv sets. */ |
2677 | |
2678 | /* Returns true if INSN is not a downward continuation of the given path P in |
2679 | the current stage. */ |
2680 | static bool |
2681 | is_ineligible_successor (insn_t insn, ilist_t p) |
2682 | { |
2683 | insn_t prev_insn; |
2684 | |
2685 | /* Check if insn is not deleted. */ |
2686 | if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn) |
2687 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2687, __FUNCTION__)); |
2688 | else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn) |
2689 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2689, __FUNCTION__)); |
2690 | |
2691 | /* If it's the first insn visited, then the successor is ok. */ |
2692 | if (!p) |
2693 | return false; |
2694 | |
2695 | prev_insn = ILIST_INSN (p)((p)->u.insn); |
2696 | |
2697 | if (/* a backward edge. */ |
2698 | INSN_SEQNO (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->seqno) < INSN_SEQNO (prev_insn)((&s_i_d[(sched_luids[INSN_UID (prev_insn)])])->seqno) |
2699 | /* is already visited. */ |
2700 | || (INSN_SEQNO (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->seqno) == INSN_SEQNO (prev_insn)((&s_i_d[(sched_luids[INSN_UID (prev_insn)])])->seqno) |
2701 | && (ilist_is_in_p (p, insn) |
2702 | /* We can reach another fence here and still seqno of insn |
2703 | would be equal to seqno of prev_insn. This is possible |
2704 | when prev_insn is a previously created bookkeeping copy. |
2705 | In that case it'd get a seqno of insn. Thus, check here |
2706 | whether insn is in current fence too. */ |
2707 | || IN_CURRENT_FENCE_P (insn)(flist_lookup (fences, insn) != nullptr))) |
2708 | /* Was already scheduled on this round. */ |
2709 | || (INSN_SEQNO (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->seqno) > INSN_SEQNO (prev_insn)((&s_i_d[(sched_luids[INSN_UID (prev_insn)])])->seqno) |
2710 | && IN_CURRENT_FENCE_P (insn)(flist_lookup (fences, insn) != nullptr)) |
2711 | /* An insn from another fence could also be |
2712 | scheduled earlier even if this insn is not in |
2713 | a fence list right now. Check INSN_SCHED_CYCLE instead. */ |
2714 | || (!pipelining_p |
2715 | && INSN_SCHED_TIMES (insn)((((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->sched_times)) > 0)) |
2716 | return true; |
2717 | else |
2718 | return false; |
2719 | } |
2720 | |
2721 | /* Computes the av_set below the last bb insn INSN, doing all the 'dirty work' |
2722 | of handling multiple successors and properly merging its av_sets. P is |
2723 | the current path traversed. WS is the size of lookahead window. |
2724 | Return the av set computed. */ |
2725 | static av_set_t |
2726 | compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws) |
2727 | { |
2728 | struct succs_info *sinfo; |
2729 | av_set_t expr_in_all_succ_branches = NULLnullptr; |
2730 | int is; |
2731 | insn_t succ, zero_succ = NULLnullptr; |
2732 | av_set_t av1 = NULLnullptr; |
2733 | |
2734 | gcc_assert (sel_bb_end_p (insn))((void)(!(sel_bb_end_p (insn)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2734, __FUNCTION__), 0 : 0)); |
2735 | |
2736 | /* Find different kind of successors needed for correct computing of |
2737 | SPEC and TARGET_AVAILABLE attributes. */ |
2738 | sinfo = compute_succs_info (insn, SUCCS_NORMAL(1)); |
2739 | |
2740 | /* Debug output. */ |
2741 | if (sched_verbose >= 6) |
2742 | { |
2743 | sel_print ("successors of bb end (%d): ", INSN_UID (insn)); |
2744 | dump_insn_vector (sinfo->succs_ok); |
2745 | sel_print ("\n"); |
2746 | if (sinfo->succs_ok_n != sinfo->all_succs_n) |
2747 | sel_print ("real successors num: %d\n", sinfo->all_succs_n); |
2748 | } |
2749 | |
2750 | /* Add insn to the tail of current path. */ |
2751 | ilist_add (&p, insn); |
2752 | |
2753 | FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)for (is = 0; (sinfo->succs_ok).iterate ((is), &(succ)) ; ++(is)) |
2754 | { |
2755 | av_set_t succ_set; |
2756 | |
2757 | /* We will edit SUCC_SET and EXPR_SPEC field of its elements. */ |
2758 | succ_set = compute_av_set_inside_bb (succ, p, ws, true); |
2759 | |
2760 | av_set_split_usefulness (succ_set, |
2761 | sinfo->probs_ok[is], |
2762 | sinfo->all_prob); |
2763 | |
2764 | if (sinfo->all_succs_n > 1) |
2765 | { |
2766 | /* Find EXPR'es that came from *all* successors and save them |
2767 | into expr_in_all_succ_branches. This set will be used later |
2768 | for calculating speculation attributes of EXPR'es. */ |
2769 | if (is == 0) |
2770 | { |
2771 | expr_in_all_succ_branches = av_set_copy (succ_set); |
2772 | |
2773 | /* Remember the first successor for later. */ |
2774 | zero_succ = succ; |
2775 | } |
2776 | else |
2777 | { |
2778 | av_set_iterator i; |
2779 | expr_t expr; |
2780 | |
2781 | FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches)for (_list_iter_start (&((i)), ((&expr_in_all_succ_branches )), true); _list_iter_cond_expr (*((i)).lp, &((expr))); _list_iter_next (&((i)))) |
2782 | if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr)((expr)->vinsn))) |
2783 | av_set_iter_remove (&i); |
2784 | } |
2785 | } |
2786 | |
2787 | /* Union the av_sets. Check liveness restrictions on target registers |
2788 | in special case of two successors. */ |
2789 | if (sinfo->succs_ok_n == 2 && is == 1) |
2790 | { |
2791 | basic_block bb0 = BLOCK_FOR_INSN (zero_succ); |
2792 | basic_block bb1 = BLOCK_FOR_INSN (succ); |
2793 | |
2794 | gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1))((void)(!(((&sel_global_bb_info[(bb0)->index])->lv_set_valid_p ) && ((&sel_global_bb_info[(bb1)->index])-> lv_set_valid_p)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2794, __FUNCTION__), 0 : 0)); |
2795 | av_set_union_and_live (&av1, &succ_set, |
2796 | BB_LV_SET (bb0)((&sel_global_bb_info[(bb0)->index])->lv_set), |
2797 | BB_LV_SET (bb1)((&sel_global_bb_info[(bb1)->index])->lv_set), |
2798 | insn); |
2799 | } |
2800 | else |
2801 | av_set_union_and_clear (&av1, &succ_set, insn); |
2802 | } |
2803 | |
2804 | /* Check liveness restrictions via hard way when there are more than |
2805 | two successors. */ |
2806 | if (sinfo->succs_ok_n > 2) |
2807 | FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)for (is = 0; (sinfo->succs_ok).iterate ((is), &(succ)) ; ++(is)) |
2808 | { |
2809 | basic_block succ_bb = BLOCK_FOR_INSN (succ); |
2810 | av_set_t av_succ = (is_ineligible_successor (succ, p) |
2811 | ? NULLnullptr |
2812 | : BB_AV_SET (succ_bb)((&sel_region_bb_info[(succ_bb)->index])->av_set)); |
2813 | |
2814 | gcc_assert (BB_LV_SET_VALID_P (succ_bb))((void)(!(((&sel_global_bb_info[(succ_bb)->index])-> lv_set_valid_p)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 2814, __FUNCTION__), 0 : 0)); |
2815 | mark_unavailable_targets (av1, av_succ, BB_LV_SET (succ_bb)((&sel_global_bb_info[(succ_bb)->index])->lv_set)); |
2816 | } |
2817 | |
2818 | /* Finally, check liveness restrictions on paths leaving the region. */ |
2819 | if (sinfo->all_succs_n > sinfo->succs_ok_n) |
2820 | FOR_EACH_VEC_ELT (sinfo->succs_other, is, succ)for (is = 0; (sinfo->succs_other).iterate ((is), &(succ )); ++(is)) |
2821 | mark_unavailable_targets |
2822 | (av1, NULLnullptr, BB_LV_SET (BLOCK_FOR_INSN (succ))((&sel_global_bb_info[(BLOCK_FOR_INSN (succ))->index]) ->lv_set)); |
2823 | |
2824 | if (sinfo->all_succs_n > 1) |
2825 | { |
2826 | av_set_iterator i; |
2827 | expr_t expr; |
2828 | |
2829 | /* Increase the spec attribute of all EXPR'es that didn't come |
2830 | from all successors. */ |
2831 | FOR_EACH_EXPR (expr, i, av1)for (_list_iter_start (&((i)), &((av1)), false); _list_iter_cond_expr (*((i)).lp, &((expr))); _list_iter_next (&((i)))) |
2832 | if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr)((expr)->vinsn))) |
2833 | EXPR_SPEC (expr)((expr)->spec)++; |
2834 | |
2835 | av_set_clear (&expr_in_all_succ_branches); |
2836 | |
2837 | /* Do not move conditional branches through other |
2838 | conditional branches. So, remove all conditional |
2839 | branches from av_set if current operator is a conditional |
2840 | branch. */ |
2841 | av_set_substract_cond_branches (&av1); |
2842 | } |
2843 | |
2844 | ilist_remove (&p)(_list_remove (&p)); |
2845 | free_succs_info (sinfo); |
2846 | |
2847 | if (sched_verbose >= 6) |
2848 | { |
2849 | sel_print ("av_succs (%d): ", INSN_UID (insn)); |
2850 | dump_av_set (av1); |
2851 | sel_print ("\n"); |
2852 | } |
2853 | |
2854 | return av1; |
2855 | } |
2856 | |
2857 | /* This function computes av_set for the FIRST_INSN by dragging valid |
2858 | av_set through all basic block insns either from the end of basic block |
2859 | (computed using compute_av_set_at_bb_end) or from the insn on which |
2860 | MAX_WS was exceeded. It uses compute_av_set_at_bb_end to compute av_set |
2861 | below the basic block and handling conditional branches. |
2862 | FIRST_INSN - the basic block head, P - path consisting of the insns |
2863 | traversed on the way to the FIRST_INSN (the path is sparse, only bb heads |
2864 | and bb ends are added to the path), WS - current window size, |
2865 | NEED_COPY_P - true if we'll make a copy of av_set before returning it. */ |
2866 | static av_set_t |
2867 | compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws, |
2868 | bool need_copy_p) |
2869 | { |
2870 | insn_t cur_insn; |
2871 | int end_ws = ws; |
2872 | insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn)); |
2873 | insn_t after_bb_end = NEXT_INSN (bb_end); |
2874 | insn_t last_insn; |
2875 | av_set_t av = NULLnullptr; |
2876 | basic_block cur_bb = BLOCK_FOR_INSN (first_insn); |
2877 | |
2878 | /* Return NULL if insn is not on the legitimate downward path. */ |
2879 | if (is_ineligible_successor (first_insn, p)) |
2880 | { |
2881 | if (sched_verbose >= 6) |
2882 | sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn)); |
2883 | |
2884 | return NULLnullptr; |
2885 | } |
2886 | |
2887 | /* If insn already has valid av(insn) computed, just return it. */ |
2888 | if (AV_SET_VALID_P (first_insn)((get_av_level (first_insn)) == global_level)) |
2889 | { |
2890 | av_set_t av_set; |
2891 | |
2892 | if (sel_bb_head_p (first_insn)) |
2893 | av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn))((&sel_region_bb_info[(BLOCK_FOR_INSN (first_insn))->index ])->av_set); |
2894 | else |
2895 | av_set = NULLnullptr; |
2896 | |
2897 | if (sched_verbose >= 6) |
2898 | { |
2899 | sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn)); |
2900 | dump_av_set (av_set); |
2901 | sel_print ("\n"); |
2902 | } |
2903 | |
2904 | return need_copy_p ? av_set_copy (av_set) : av_set; |
2905 | } |
2906 | |
2907 | ilist_add (&p, first_insn); |
2908 | |
2909 | /* As the result after this loop have completed, in LAST_INSN we'll |
2910 | have the insn which has valid av_set to start backward computation |
2911 | from: it either will be NULL because on it the window size was exceeded |
2912 | or other valid av_set as returned by compute_av_set for the last insn |
2913 | of the basic block. */ |
2914 | for (last_insn = first_insn; last_insn != after_bb_end; |
2915 | last_insn = NEXT_INSN (last_insn)) |
2916 | { |
2917 | /* We may encounter valid av_set not only on bb_head, but also on |
2918 | those insns on which previously MAX_WS was exceeded. */ |
2919 | if (AV_SET_VALID_P (last_insn)((get_av_level (last_insn)) == global_level)) |
2920 | { |
2921 | if (sched_verbose >= 6) |
2922 | sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn)); |
2923 | break; |
2924 | } |
2925 | |
2926 | /* The special case: the last insn of the BB may be an |
2927 | ineligible_successor due to its SEQ_NO that was set on |
2928 | it as a bookkeeping. */ |
2929 | if (last_insn != first_insn |
2930 | && is_ineligible_successor (last_insn, p)) |
2931 | { |
2932 | if (sched_verbose >= 6) |
2933 | sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn)); |
2934 | break; |
2935 | } |
2936 | |
2937 | if (DEBUG_INSN_P (last_insn)(((enum rtx_code) (last_insn)->code) == DEBUG_INSN)) |
2938 | continue; |
2939 | |
2940 | if (end_ws > max_ws) |
2941 | { |
2942 | /* We can reach max lookahead size at bb_header, so clean av_set |
2943 | first. */ |
2944 | INSN_WS_LEVEL (last_insn)((&s_i_d[(sched_luids[INSN_UID (last_insn)])])->ws_level ) = global_level; |
2945 | |
2946 | if (sched_verbose >= 6) |
2947 | sel_print ("Insn %d is beyond the software lookahead window size\n", |
2948 | INSN_UID (last_insn)); |
2949 | break; |
2950 | } |
2951 | |
2952 | end_ws++; |
2953 | } |
2954 | |
2955 | /* Get the valid av_set into AV above the LAST_INSN to start backward |
2956 | computation from. It either will be empty av_set or av_set computed from |
2957 | the successors on the last insn of the current bb. */ |
2958 | if (last_insn != after_bb_end) |
2959 | { |
2960 | av = NULLnullptr; |
2961 | |
2962 | /* This is needed only to obtain av_sets that are identical to |
2963 | those computed by the old compute_av_set version. */ |
2964 | if (last_insn == first_insn && !INSN_NOP_P (last_insn)(PATTERN (last_insn) == nop_pattern)) |
2965 | av_set_add (&av, INSN_EXPR (last_insn)(&(&s_i_d[(sched_luids[INSN_UID (last_insn)])])->expr )); |
2966 | } |
2967 | else |
2968 | /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END. */ |
2969 | av = compute_av_set_at_bb_end (bb_end, p, end_ws); |
2970 | |
2971 | /* Compute av_set in AV starting from below the LAST_INSN up to |
2972 | location above the FIRST_INSN. */ |
2973 | for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn); |
2974 | cur_insn = PREV_INSN (cur_insn)) |
2975 | if (!INSN_NOP_P (cur_insn)(PATTERN (cur_insn) == nop_pattern)) |
2976 | { |
2977 | expr_t expr; |
2978 | |
2979 | moveup_set_expr (&av, cur_insn, false); |
2980 | |
2981 | /* If the expression for CUR_INSN is already in the set, |
2982 | replace it by the new one. */ |
2983 | expr = av_set_lookup (av, INSN_VINSN (cur_insn)((((&(&s_i_d[(sched_luids[INSN_UID (cur_insn)])])-> expr))->vinsn))); |
2984 | if (expr != NULLnullptr) |
2985 | { |
2986 | clear_expr (expr); |
2987 | copy_expr (expr, INSN_EXPR (cur_insn)(&(&s_i_d[(sched_luids[INSN_UID (cur_insn)])])->expr )); |
2988 | } |
2989 | else |
2990 | av_set_add (&av, INSN_EXPR (cur_insn)(&(&s_i_d[(sched_luids[INSN_UID (cur_insn)])])->expr )); |
2991 | } |
2992 | |
2993 | /* Clear stale bb_av_set. */ |
2994 | if (sel_bb_head_p (first_insn)) |
2995 | { |
2996 | av_set_clear (&BB_AV_SET (cur_bb)((&sel_region_bb_info[(cur_bb)->index])->av_set)); |
2997 | BB_AV_SET (cur_bb)((&sel_region_bb_info[(cur_bb)->index])->av_set) = need_copy_p ? av_set_copy (av) : av; |
2998 | BB_AV_LEVEL (cur_bb)((&sel_region_bb_info[(cur_bb)->index])->av_level) = global_level; |
2999 | } |
3000 | |
3001 | if (sched_verbose >= 6) |
3002 | { |
3003 | sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn)); |
3004 | dump_av_set (av); |
3005 | sel_print ("\n"); |
3006 | } |
3007 | |
3008 | ilist_remove (&p)(_list_remove (&p)); |
3009 | return av; |
3010 | } |
3011 | |
3012 | /* Compute av set before INSN. |
3013 | INSN - the current operation (actual rtx INSN) |
3014 | P - the current path, which is list of insns visited so far |
3015 | WS - software lookahead window size. |
3016 | UNIQUE_P - TRUE, if returned av_set will be changed, hence |
3017 | if we want to save computed av_set in s_i_d, we should make a copy of it. |
3018 | |
3019 | In the resulting set we will have only expressions that don't have delay |
3020 | stalls and nonsubstitutable dependences. */ |
3021 | static av_set_t |
3022 | compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p) |
3023 | { |
3024 | return compute_av_set_inside_bb (insn, p, ws, unique_p); |
3025 | } |
3026 | |
3027 | /* Propagate a liveness set LV through INSN. */ |
3028 | static void |
3029 | propagate_lv_set (regset lv, insn_t insn) |
3030 | { |
3031 | gcc_assert (INSN_P (insn))((void)(!((((((enum rtx_code) (insn)->code) == INSN) || (( (enum rtx_code) (insn)->code) == JUMP_INSN) || (((enum rtx_code ) (insn)->code) == CALL_INSN)) || (((enum rtx_code) (insn) ->code) == DEBUG_INSN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3031, __FUNCTION__), 0 : 0)); |
3032 | |
3033 | if (INSN_NOP_P (insn)(PATTERN (insn) == nop_pattern)) |
3034 | return; |
3035 | |
3036 | df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv); |
3037 | } |
3038 | |
3039 | /* Return livness set at the end of BB. */ |
3040 | static regset |
3041 | compute_live_after_bb (basic_block bb) |
3042 | { |
3043 | edge e; |
3044 | edge_iterator ei; |
3045 | regset lv = get_clear_regset_from_pool (); |
3046 | |
3047 | gcc_assert (!ignore_first)((void)(!(!ignore_first) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3047, __FUNCTION__), 0 : 0)); |
3048 | |
3049 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
3050 | if (sel_bb_empty_p (e->dest)) |
3051 | { |
3052 | if (! BB_LV_SET_VALID_P (e->dest)((&sel_global_bb_info[(e->dest)->index])->lv_set_valid_p )) |
3053 | { |
3054 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3054, __FUNCTION__)); |
3055 | gcc_assert (BB_LV_SET (e->dest) == NULL)((void)(!(((&sel_global_bb_info[(e->dest)->index])-> lv_set) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3055, __FUNCTION__), 0 : 0)); |
3056 | BB_LV_SET (e->dest)((&sel_global_bb_info[(e->dest)->index])->lv_set ) = compute_live_after_bb (e->dest); |
3057 | BB_LV_SET_VALID_P (e->dest)((&sel_global_bb_info[(e->dest)->index])->lv_set_valid_p ) = true; |
3058 | } |
3059 | IOR_REG_SET (lv, BB_LV_SET (e->dest))bitmap_ior_into (lv, ((&sel_global_bb_info[(e->dest)-> index])->lv_set)); |
3060 | } |
3061 | else |
3062 | IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest)))bitmap_ior_into (lv, compute_live (sel_bb_head (e->dest))); |
3063 | |
3064 | return lv; |
3065 | } |
3066 | |
3067 | /* Compute the set of all live registers at the point before INSN and save |
3068 | it at INSN if INSN is bb header. */ |
3069 | regset |
3070 | compute_live (insn_t insn) |
3071 | { |
3072 | basic_block bb = BLOCK_FOR_INSN (insn); |
3073 | insn_t final, temp; |
3074 | regset lv; |
3075 | |
3076 | /* Return the valid set if we're already on it. */ |
3077 | if (!ignore_first) |
3078 | { |
3079 | regset src = NULLnullptr; |
3080 | |
3081 | if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb)((&sel_global_bb_info[(bb)->index])->lv_set_valid_p )) |
3082 | src = BB_LV_SET (bb)((&sel_global_bb_info[(bb)->index])->lv_set); |
3083 | else |
3084 | { |
3085 | gcc_assert (in_current_region_p (bb))((void)(!(in_current_region_p (bb)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3085, __FUNCTION__), 0 : 0)); |
3086 | if (INSN_LIVE_VALID_P (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->live_valid_p )) |
3087 | src = INSN_LIVE (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->live); |
3088 | } |
3089 | |
3090 | if (src) |
3091 | { |
3092 | lv = get_regset_from_pool (); |
3093 | COPY_REG_SET (lv, src)bitmap_copy (lv, src); |
3094 | |
3095 | if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb)((&sel_global_bb_info[(bb)->index])->lv_set_valid_p )) |
3096 | { |
3097 | COPY_REG_SET (BB_LV_SET (bb), lv)bitmap_copy (((&sel_global_bb_info[(bb)->index])->lv_set ), lv); |
3098 | BB_LV_SET_VALID_P (bb)((&sel_global_bb_info[(bb)->index])->lv_set_valid_p ) = true; |
3099 | } |
3100 | |
3101 | return_regset_to_pool (lv); |
3102 | return lv; |
3103 | } |
3104 | } |
3105 | |
3106 | /* We've skipped the wrong lv_set. Don't skip the right one. */ |
3107 | ignore_first = false; |
3108 | gcc_assert (in_current_region_p (bb))((void)(!(in_current_region_p (bb)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3108, __FUNCTION__), 0 : 0)); |
3109 | |
3110 | /* Find a valid LV set in this block or below, if needed. |
3111 | Start searching from the next insn: either ignore_first is true, or |
3112 | INSN doesn't have a correct live set. */ |
3113 | temp = NEXT_INSN (insn); |
3114 | final = NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_); |
3115 | while (temp != final && ! INSN_LIVE_VALID_P (temp)((&s_i_d[(sched_luids[INSN_UID (temp)])])->live_valid_p )) |
3116 | temp = NEXT_INSN (temp); |
3117 | if (temp == final) |
3118 | { |
3119 | lv = compute_live_after_bb (bb); |
3120 | temp = PREV_INSN (temp); |
3121 | } |
3122 | else |
3123 | { |
3124 | lv = get_regset_from_pool (); |
3125 | COPY_REG_SET (lv, INSN_LIVE (temp))bitmap_copy (lv, ((&s_i_d[(sched_luids[INSN_UID (temp)])] )->live)); |
3126 | } |
3127 | |
3128 | /* Put correct lv sets on the insns which have bad sets. */ |
3129 | final = PREV_INSN (insn); |
3130 | while (temp != final) |
3131 | { |
3132 | propagate_lv_set (lv, temp); |
3133 | COPY_REG_SET (INSN_LIVE (temp), lv)bitmap_copy (((&s_i_d[(sched_luids[INSN_UID (temp)])])-> live), lv); |
3134 | INSN_LIVE_VALID_P (temp)((&s_i_d[(sched_luids[INSN_UID (temp)])])->live_valid_p ) = true; |
3135 | temp = PREV_INSN (temp); |
3136 | } |
3137 | |
3138 | /* Also put it in a BB. */ |
3139 | if (sel_bb_head_p (insn)) |
3140 | { |
3141 | basic_block bb = BLOCK_FOR_INSN (insn); |
3142 | |
3143 | COPY_REG_SET (BB_LV_SET (bb), lv)bitmap_copy (((&sel_global_bb_info[(bb)->index])->lv_set ), lv); |
3144 | BB_LV_SET_VALID_P (bb)((&sel_global_bb_info[(bb)->index])->lv_set_valid_p ) = true; |
3145 | } |
3146 | |
3147 | /* We return LV to the pool, but will not clear it there. Thus we can |
3148 | legimatelly use LV till the next use of regset_pool_get (). */ |
3149 | return_regset_to_pool (lv); |
3150 | return lv; |
3151 | } |
3152 | |
3153 | /* Update liveness sets for INSN. */ |
3154 | static inline void |
3155 | update_liveness_on_insn (rtx_insn *insn) |
3156 | { |
3157 | ignore_first = true; |
3158 | compute_live (insn); |
3159 | } |
3160 | |
3161 | /* Compute liveness below INSN and write it into REGS. */ |
3162 | static inline void |
3163 | compute_live_below_insn (rtx_insn *insn, regset regs) |
3164 | { |
3165 | rtx_insn *succ; |
3166 | succ_iterator si; |
3167 | |
3168 | FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)for ((si) = _succ_iter_start (&(succ), (insn), (((1) | (2 ) | (4)))); _succ_iter_cond (&(si), &(succ), (insn), _eligible_successor_edge_p ); _succ_iter_next (&(si))) |
3169 | IOR_REG_SET (regs, compute_live (succ))bitmap_ior_into (regs, compute_live (succ)); |
3170 | } |
3171 | |
3172 | /* Update the data gathered in av and lv sets starting from INSN. */ |
3173 | static void |
3174 | update_data_sets (rtx_insn *insn) |
3175 | { |
3176 | update_liveness_on_insn (insn); |
3177 | if (sel_bb_head_p (insn)) |
3178 | { |
3179 | gcc_assert (AV_LEVEL (insn) != 0)((void)(!((get_av_level (insn)) != 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3179, __FUNCTION__), 0 : 0)); |
3180 | BB_AV_LEVEL (BLOCK_FOR_INSN (insn))((&sel_region_bb_info[(BLOCK_FOR_INSN (insn))->index]) ->av_level) = -1; |
3181 | compute_av_set (insn, NULLnullptr, 0, 0); |
3182 | } |
3183 | } |
3184 | |
3185 | |
3186 | /* Helper for move_op () and find_used_regs (). |
3187 | Return speculation type for which a check should be created on the place |
3188 | of INSN. EXPR is one of the original ops we are searching for. */ |
3189 | static ds_t |
3190 | get_spec_check_type_for_insn (insn_t insn, expr_t expr) |
3191 | { |
3192 | ds_t to_check_ds; |
3193 | ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->spec_done_ds); |
3194 | |
3195 | to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr)((expr)->spec_to_check_ds); |
3196 | |
3197 | if (targetm.sched.get_insn_checked_ds) |
3198 | already_checked_ds |= targetm.sched.get_insn_checked_ds (insn); |
3199 | |
3200 | if (spec_info != NULLnullptr |
3201 | && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)) |
3202 | already_checked_ds |= BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ); |
3203 | |
3204 | already_checked_ds = ds_get_speculation_types (already_checked_ds); |
3205 | |
3206 | to_check_ds &= ~already_checked_ds; |
3207 | |
3208 | return to_check_ds; |
3209 | } |
3210 | |
3211 | /* Find the set of registers that are unavailable for storing expres |
3212 | while moving ORIG_OPS up on the path starting from INSN due to |
3213 | liveness (USED_REGS) or hardware restrictions (REG_RENAME_P). |
3214 | |
3215 | All the original operations found during the traversal are saved in the |
3216 | ORIGINAL_INSNS list. |
3217 | |
3218 | REG_RENAME_P denotes the set of hardware registers that |
3219 | cannot be used with renaming due to the register class restrictions, |
3220 | mode restrictions and other (the register we'll choose should be |
3221 | compatible class with the original uses, shouldn't be in call_used_regs, |
3222 | should be HARD_REGNO_RENAME_OK etc). |
3223 | |
3224 | Returns TRUE if we've found all original insns, FALSE otherwise. |
3225 | |
3226 | This function utilizes code_motion_path_driver (formerly find_used_regs_1) |
3227 | to traverse the code motion paths. This helper function finds registers |
3228 | that are not available for storing expres while moving ORIG_OPS up on the |
3229 | path starting from INSN. A register considered as used on the moving path, |
3230 | if one of the following conditions is not satisfied: |
3231 | |
3232 | (1) a register not set or read on any path from xi to an instance of |
3233 | the original operation, |
3234 | (2) not among the live registers of the point immediately following the |
3235 | first original operation on a given downward path, except for the |
3236 | original target register of the operation, |
3237 | (3) not live on the other path of any conditional branch that is passed |
3238 | by the operation, in case original operations are not present on |
3239 | both paths of the conditional branch. |
3240 | |
3241 | All the original operations found during the traversal are saved in the |
3242 | ORIGINAL_INSNS list. |
3243 | |
3244 | REG_RENAME_P->CROSSED_CALL_ABIS is true, if there is a call insn on the path |
3245 | from INSN to original insn. In this case CALL_USED_REG_SET will be added |
3246 | to unavailable hard regs at the point original operation is found. */ |
3247 | |
3248 | static bool |
3249 | find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs, |
3250 | struct reg_rename *reg_rename_p, def_list_t *original_insns) |
3251 | { |
3252 | def_list_iterator i; |
3253 | def_t def; |
3254 | int res; |
3255 | bool needs_spec_check_p = false; |
3256 | expr_t expr; |
3257 | av_set_iterator expr_iter; |
3258 | struct fur_static_params sparams; |
3259 | struct cmpd_local_params lparams; |
3260 | |
3261 | /* We haven't visited any blocks yet. */ |
3262 | bitmap_clear (code_motion_visited_blocks); |
3263 | |
3264 | /* Init parameters for code_motion_path_driver. */ |
3265 | sparams.crossed_call_abis = 0; |
3266 | sparams.original_insns = original_insns; |
3267 | sparams.used_regs = used_regs; |
3268 | |
3269 | /* Set the appropriate hooks and data. */ |
3270 | code_motion_path_driver_info = &fur_hooks; |
3271 | |
3272 | res = code_motion_path_driver (insn, orig_ops, NULLnullptr, &lparams, &sparams); |
3273 | |
3274 | reg_rename_p->crossed_call_abis |= sparams.crossed_call_abis; |
3275 | |
3276 | gcc_assert (res == 1)((void)(!(res == 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3276, __FUNCTION__), 0 : 0)); |
3277 | gcc_assert (original_insns && *original_insns)((void)(!(original_insns && *original_insns) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3277, __FUNCTION__), 0 : 0)); |
3278 | |
3279 | /* ??? We calculate whether an expression needs a check when computing |
3280 | av sets. This information is not as precise as it could be due to |
3281 | merging this bit in merge_expr. We can do better in find_used_regs, |
3282 | but we want to avoid multiple traversals of the same code motion |
3283 | paths. */ |
3284 | FOR_EACH_EXPR (expr, expr_iter, orig_ops)for (_list_iter_start (&((expr_iter)), &((orig_ops)), false); _list_iter_cond_expr (*((expr_iter)).lp, &((expr ))); _list_iter_next (&((expr_iter)))) |
3285 | needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr)((expr)->needs_spec_check_p); |
3286 | |
3287 | /* Mark hardware regs in REG_RENAME_P that are not suitable |
3288 | for renaming expr in INSN due to hardware restrictions (register class, |
3289 | modes compatibility etc). */ |
3290 | FOR_EACH_DEF (def, i, *original_insns)for (_list_iter_start (&((i)), &((*original_insns)), false ); _list_iter_cond_def (*((i)).lp, &((def))); _list_iter_next (&((i)))) |
3291 | { |
3292 | vinsn_t vinsn = INSN_VINSN (def->orig_insn)((((&(&s_i_d[(sched_luids[INSN_UID (def->orig_insn )])])->expr))->vinsn)); |
3293 | |
3294 | if (VINSN_SEPARABLE_P (vinsn)(((((&((vinsn)->id)))->type)) == SET)) |
3295 | mark_unavailable_hard_regs (def, reg_rename_p, used_regs); |
3296 | |
3297 | /* Do not allow clobbering of ld.[sa] address in case some of the |
3298 | original operations need a check. */ |
3299 | if (needs_spec_check_p) |
3300 | IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn))bitmap_ior_into (used_regs, ((((&((vinsn)->id)))->reg_uses ))); |
3301 | } |
3302 | |
3303 | return true; |
3304 | } |
3305 | |
3306 | |
3307 | /* Functions to choose the best insn from available ones. */ |
3308 | |
3309 | /* Adjusts the priority for EXPR using the backend *_adjust_priority hook. */ |
3310 | static int |
3311 | sel_target_adjust_priority (expr_t expr) |
3312 | { |
3313 | int priority = EXPR_PRIORITY (expr)((expr)->priority); |
3314 | int new_priority; |
3315 | |
3316 | if (targetm.sched.adjust_priority) |
3317 | new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)), priority); |
3318 | else |
3319 | new_priority = priority; |
3320 | |
3321 | /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly. */ |
3322 | EXPR_PRIORITY_ADJ (expr)((expr)->priority_adj) = new_priority - EXPR_PRIORITY (expr)((expr)->priority); |
3323 | |
3324 | if (sched_verbose >= 4) |
3325 | sel_print ("sel_target_adjust_priority: insn %d, %d+%d = %d.\n", |
3326 | INSN_UID (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx))), EXPR_PRIORITY (expr)((expr)->priority), |
3327 | EXPR_PRIORITY_ADJ (expr)((expr)->priority_adj), new_priority); |
3328 | |
3329 | return new_priority; |
3330 | } |
3331 | |
3332 | /* Rank two available exprs for schedule. Never return 0 here. */ |
3333 | static int |
3334 | sel_rank_for_schedule (const void *x, const void *y) |
3335 | { |
3336 | expr_t tmp = *(const expr_t *) y; |
3337 | expr_t tmp2 = *(const expr_t *) x; |
3338 | insn_t tmp_insn, tmp2_insn; |
3339 | vinsn_t tmp_vinsn, tmp2_vinsn; |
3340 | int val; |
3341 | |
3342 | tmp_vinsn = EXPR_VINSN (tmp)((tmp)->vinsn); |
3343 | tmp2_vinsn = EXPR_VINSN (tmp2)((tmp2)->vinsn); |
3344 | tmp_insn = EXPR_INSN_RTX (tmp)(((((tmp)->vinsn))->insn_rtx)); |
3345 | tmp2_insn = EXPR_INSN_RTX (tmp2)(((((tmp2)->vinsn))->insn_rtx)); |
3346 | |
3347 | /* Schedule debug insns as early as possible. */ |
3348 | if (DEBUG_INSN_P (tmp_insn)(((enum rtx_code) (tmp_insn)->code) == DEBUG_INSN) && !DEBUG_INSN_P (tmp2_insn)(((enum rtx_code) (tmp2_insn)->code) == DEBUG_INSN)) |
3349 | return -1; |
3350 | else if (DEBUG_INSN_P (tmp2_insn)(((enum rtx_code) (tmp2_insn)->code) == DEBUG_INSN)) |
3351 | return 1; |
3352 | |
3353 | /* Prefer SCHED_GROUP_P insns to any others. */ |
3354 | if (SCHED_GROUP_P (tmp_insn)(__extension__ ({ __typeof ((tmp_insn)) const _rtx = ((tmp_insn )); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3354, __FUNCTION__); _rtx; })->in_struct) != SCHED_GROUP_P (tmp2_insn)(__extension__ ({ __typeof ((tmp2_insn)) const _rtx = ((tmp2_insn )); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3354, __FUNCTION__); _rtx; })->in_struct)) |
3355 | { |
3356 | if (VINSN_UNIQUE_P (tmp_vinsn)(!((((((&((tmp_vinsn)->id)))->type)) == SET) || ((( (&((tmp_vinsn)->id)))->type)) == USE)) && VINSN_UNIQUE_P (tmp2_vinsn)(!((((((&((tmp2_vinsn)->id)))->type)) == SET) || (( ((&((tmp2_vinsn)->id)))->type)) == USE))) |
3357 | return SCHED_GROUP_P (tmp2_insn)(__extension__ ({ __typeof ((tmp2_insn)) const _rtx = ((tmp2_insn )); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3357, __FUNCTION__); _rtx; })->in_struct) ? 1 : -1; |
3358 | |
3359 | /* Now uniqueness means SCHED_GROUP_P is set, because schedule groups |
3360 | cannot be cloned. */ |
3361 | if (VINSN_UNIQUE_P (tmp2_vinsn)(!((((((&((tmp2_vinsn)->id)))->type)) == SET) || (( ((&((tmp2_vinsn)->id)))->type)) == USE))) |
3362 | return 1; |
3363 | return -1; |
3364 | } |
3365 | |
3366 | /* Discourage scheduling of speculative checks. */ |
3367 | val = (sel_insn_is_speculation_check (tmp_insn) |
3368 | - sel_insn_is_speculation_check (tmp2_insn)); |
3369 | if (val) |
3370 | return val; |
3371 | |
3372 | /* Prefer not scheduled insn over scheduled one. */ |
3373 | if (EXPR_SCHED_TIMES (tmp)((tmp)->sched_times) > 0 || EXPR_SCHED_TIMES (tmp2)((tmp2)->sched_times) > 0) |
3374 | { |
3375 | val = EXPR_SCHED_TIMES (tmp)((tmp)->sched_times) - EXPR_SCHED_TIMES (tmp2)((tmp2)->sched_times); |
3376 | if (val) |
3377 | return val; |
3378 | } |
3379 | |
3380 | /* Prefer jump over non-jump instruction. */ |
3381 | if (control_flow_insn_p (tmp_insn) && !control_flow_insn_p (tmp2_insn)) |
3382 | return -1; |
3383 | else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn)) |
3384 | return 1; |
3385 | |
3386 | /* Prefer an expr with non-zero usefulness. */ |
3387 | int u1 = EXPR_USEFULNESS (tmp)((tmp)->usefulness), u2 = EXPR_USEFULNESS (tmp2)((tmp2)->usefulness); |
3388 | |
3389 | if (u1 == 0) |
3390 | { |
3391 | if (u2 == 0) |
3392 | u1 = u2 = 1; |
3393 | else |
3394 | return 1; |
3395 | } |
3396 | else if (u2 == 0) |
3397 | return -1; |
3398 | |
3399 | /* Prefer an expr with greater priority. */ |
3400 | val = (u2 * (EXPR_PRIORITY (tmp2)((tmp2)->priority) + EXPR_PRIORITY_ADJ (tmp2)((tmp2)->priority_adj)) |
3401 | - u1 * (EXPR_PRIORITY (tmp)((tmp)->priority) + EXPR_PRIORITY_ADJ (tmp)((tmp)->priority_adj))); |
3402 | if (val) |
3403 | return val; |
3404 | |
3405 | if (spec_info != NULLnullptr && spec_info->mask != 0) |
3406 | /* This code was taken from haifa-sched.cc: rank_for_schedule (). */ |
3407 | { |
3408 | ds_t ds1, ds2; |
3409 | dw_t dw1, dw2; |
3410 | int dw; |
3411 | |
3412 | ds1 = EXPR_SPEC_DONE_DS (tmp)((tmp)->spec_done_ds); |
3413 | if (ds1) |
3414 | dw1 = ds_weak (ds1); |
3415 | else |
3416 | dw1 = NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1); |
3417 | |
3418 | ds2 = EXPR_SPEC_DONE_DS (tmp2)((tmp2)->spec_done_ds); |
3419 | if (ds2) |
3420 | dw2 = ds_weak (ds2); |
3421 | else |
3422 | dw2 = NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1); |
3423 | |
3424 | dw = dw2 - dw1; |
3425 | if (dw > (NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1) / 8) || dw < -(NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1) / 8)) |
3426 | return dw; |
3427 | } |
3428 | |
3429 | /* Prefer an old insn to a bookkeeping insn. */ |
3430 | if (INSN_UID (tmp_insn) < first_emitted_uid |
3431 | && INSN_UID (tmp2_insn) >= first_emitted_uid) |
3432 | return -1; |
3433 | if (INSN_UID (tmp_insn) >= first_emitted_uid |
3434 | && INSN_UID (tmp2_insn) < first_emitted_uid) |
3435 | return 1; |
3436 | |
3437 | /* Prefer an insn with smaller UID, as a last resort. |
3438 | We can't safely use INSN_LUID as it is defined only for those insns |
3439 | that are in the stream. */ |
3440 | return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn); |
3441 | } |
3442 | |
3443 | /* Filter out expressions from av set pointed to by AV_PTR |
3444 | that are pipelined too many times. */ |
3445 | static void |
3446 | process_pipelined_exprs (av_set_t *av_ptr) |
3447 | { |
3448 | expr_t expr; |
3449 | av_set_iterator si; |
3450 | |
3451 | /* Don't pipeline already pipelined code as that would increase |
3452 | number of unnecessary register moves. */ |
3453 | FOR_EACH_EXPR_1 (expr, si, av_ptr)for (_list_iter_start (&((si)), ((av_ptr)), true); _list_iter_cond_expr (*((si)).lp, &((expr))); _list_iter_next (&((si)))) |
3454 | { |
3455 | if (EXPR_SCHED_TIMES (expr)((expr)->sched_times) |
3456 | >= param_selsched_max_sched_timesglobal_options.x_param_selsched_max_sched_times) |
3457 | av_set_iter_remove (&si); |
3458 | } |
3459 | } |
3460 | |
3461 | /* Filter speculative insns from AV_PTR if we don't want them. */ |
3462 | static void |
3463 | process_spec_exprs (av_set_t *av_ptr) |
3464 | { |
3465 | expr_t expr; |
3466 | av_set_iterator si; |
3467 | |
3468 | if (spec_info == NULLnullptr) |
3469 | return; |
3470 | |
3471 | /* Scan *AV_PTR to find out if we want to consider speculative |
3472 | instructions for scheduling. */ |
3473 | FOR_EACH_EXPR_1 (expr, si, av_ptr)for (_list_iter_start (&((si)), ((av_ptr)), true); _list_iter_cond_expr (*((si)).lp, &((expr))); _list_iter_next (&((si)))) |
3474 | { |
3475 | ds_t ds; |
3476 | |
3477 | ds = EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds); |
3478 | |
3479 | /* The probability of a success is too low - don't speculate. */ |
3480 | if ((ds & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) |
3481 | && (ds_weak (ds) < spec_info->data_weakness_cutoff |
3482 | || EXPR_USEFULNESS (expr)((expr)->usefulness) < spec_info->control_weakness_cutoff |
3483 | || (pipelining_p && false |
3484 | && (ds & DATA_SPEC((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET))) |
3485 | && (ds & CONTROL_SPEC((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET)))))) |
3486 | { |
3487 | av_set_iter_remove (&si); |
3488 | continue; |
3489 | } |
3490 | } |
3491 | } |
3492 | |
3493 | /* Search for any use-like insns in AV_PTR and decide on scheduling |
3494 | them. Return one when found, and NULL otherwise. |
3495 | Note that we check here whether a USE could be scheduled to avoid |
3496 | an infinite loop later. */ |
3497 | static expr_t |
3498 | process_use_exprs (av_set_t *av_ptr) |
3499 | { |
3500 | expr_t expr; |
3501 | av_set_iterator si; |
3502 | bool uses_present_p = false; |
3503 | bool try_uses_p = true; |
3504 | |
3505 | FOR_EACH_EXPR_1 (expr, si, av_ptr)for (_list_iter_start (&((si)), ((av_ptr)), true); _list_iter_cond_expr (*((si)).lp, &((expr))); _list_iter_next (&((si)))) |
3506 | { |
3507 | /* This will also initialize INSN_CODE for later use. */ |
3508 | if (recog_memoized (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx))) < 0) |
3509 | { |
3510 | /* If we have a USE in *AV_PTR that was not scheduled yet, |
3511 | do so because it will do good only. */ |
3512 | if (EXPR_SCHED_TIMES (expr)((expr)->sched_times) <= 0) |
3513 | { |
3514 | if (EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) == 1) |
3515 | return expr; |
3516 | |
3517 | av_set_iter_remove (&si); |
3518 | } |
3519 | else |
3520 | { |
3521 | gcc_assert (pipelining_p)((void)(!(pipelining_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3521, __FUNCTION__), 0 : 0)); |
3522 | |
3523 | uses_present_p = true; |
3524 | } |
3525 | } |
3526 | else |
3527 | try_uses_p = false; |
3528 | } |
3529 | |
3530 | if (uses_present_p) |
3531 | { |
3532 | /* If we don't want to schedule any USEs right now and we have some |
3533 | in *AV_PTR, remove them, else just return the first one found. */ |
3534 | if (!try_uses_p) |
3535 | { |
3536 | FOR_EACH_EXPR_1 (expr, si, av_ptr)for (_list_iter_start (&((si)), ((av_ptr)), true); _list_iter_cond_expr (*((si)).lp, &((expr))); _list_iter_next (&((si)))) |
3537 | if (INSN_CODE (EXPR_INSN_RTX (expr))((((((((expr)->vinsn))->insn_rtx)))->u.fld[5]).rt_int ) < 0) |
3538 | av_set_iter_remove (&si); |
3539 | } |
3540 | else |
3541 | { |
3542 | FOR_EACH_EXPR_1 (expr, si, av_ptr)for (_list_iter_start (&((si)), ((av_ptr)), true); _list_iter_cond_expr (*((si)).lp, &((expr))); _list_iter_next (&((si)))) |
3543 | { |
3544 | gcc_assert (INSN_CODE (EXPR_INSN_RTX (expr)) < 0)((void)(!(((((((((expr)->vinsn))->insn_rtx)))->u.fld [5]).rt_int) < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3544, __FUNCTION__), 0 : 0)); |
3545 | |
3546 | if (EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) == 1) |
3547 | return expr; |
3548 | |
3549 | av_set_iter_remove (&si); |
3550 | } |
3551 | } |
3552 | } |
3553 | |
3554 | return NULLnullptr; |
3555 | } |
3556 | |
3557 | /* Lookup EXPR in VINSN_VEC and return TRUE if found. Also check patterns from |
3558 | EXPR's history of changes. */ |
3559 | static bool |
3560 | vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr) |
3561 | { |
3562 | vinsn_t vinsn, expr_vinsn; |
3563 | int n; |
3564 | unsigned i; |
3565 | |
3566 | /* Start with checking expr itself and then proceed with all the old forms |
3567 | of expr taken from its history vector. */ |
3568 | for (i = 0, expr_vinsn = EXPR_VINSN (expr)((expr)->vinsn); |
3569 | expr_vinsn; |
3570 | expr_vinsn = (i < EXPR_HISTORY_OF_CHANGES (expr)((expr)->history_of_changes).length () |
3571 | ? EXPR_HISTORY_OF_CHANGES (expr)((expr)->history_of_changes)[i++].old_expr_vinsn |
3572 | : NULLnullptr)) |
3573 | FOR_EACH_VEC_ELT (vinsn_vec, n, vinsn)for (n = 0; (vinsn_vec).iterate ((n), &(vinsn)); ++(n)) |
3574 | if (VINSN_SEPARABLE_P (vinsn)(((((&((vinsn)->id)))->type)) == SET)) |
3575 | { |
3576 | if (vinsn_equal_p (vinsn, expr_vinsn)) |
3577 | return true; |
3578 | } |
3579 | else |
3580 | { |
3581 | /* For non-separable instructions, the blocking insn can have |
3582 | another pattern due to substitution, and we can't choose |
3583 | different register as in the above case. Check all registers |
3584 | being written instead. */ |
3585 | if (bitmap_intersect_p (VINSN_REG_SETS (vinsn)((((&((vinsn)->id)))->reg_sets)), |
3586 | VINSN_REG_SETS (expr_vinsn)((((&((expr_vinsn)->id)))->reg_sets)))) |
3587 | return true; |
3588 | } |
3589 | |
3590 | return false; |
3591 | } |
3592 | |
3593 | /* Return true if either of expressions from ORIG_OPS can be blocked |
3594 | by previously created bookkeeping code. STATIC_PARAMS points to static |
3595 | parameters of move_op. */ |
3596 | static bool |
3597 | av_set_could_be_blocked_by_bookkeeping_p (av_set_t orig_ops, void *static_params) |
3598 | { |
3599 | expr_t expr; |
3600 | av_set_iterator iter; |
3601 | moveop_static_params_p sparams; |
3602 | |
3603 | /* This checks that expressions in ORIG_OPS are not blocked by bookkeeping |
3604 | created while scheduling on another fence. */ |
3605 | FOR_EACH_EXPR (expr, iter, orig_ops)for (_list_iter_start (&((iter)), &((orig_ops)), false ); _list_iter_cond_expr (*((iter)).lp, &((expr))); _list_iter_next (&((iter)))) |
3606 | if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr)) |
3607 | return true; |
3608 | |
3609 | gcc_assert (code_motion_path_driver_info == &move_op_hooks)((void)(!(code_motion_path_driver_info == &move_op_hooks) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3609, __FUNCTION__), 0 : 0)); |
3610 | sparams = (moveop_static_params_p) static_params; |
3611 | |
3612 | /* Expressions can be also blocked by bookkeeping created during current |
3613 | move_op. */ |
3614 | if (bitmap_bit_p (current_copies, INSN_UID (sparams->failed_insn))) |
3615 | FOR_EACH_EXPR (expr, iter, orig_ops)for (_list_iter_start (&((iter)), &((orig_ops)), false ); _list_iter_cond_expr (*((iter)).lp, &((expr))); _list_iter_next (&((iter)))) |
3616 | if (moveup_expr_cached (expr, sparams->failed_insn, false) != MOVEUP_EXPR_NULL) |
3617 | return true; |
3618 | |
3619 | /* Expressions in ORIG_OPS may have wrong destination register due to |
3620 | renaming. Check with the right register instead. */ |
3621 | if (sparams->dest && REG_P (sparams->dest)(((enum rtx_code) (sparams->dest)->code) == REG)) |
3622 | { |
3623 | rtx reg = sparams->dest; |
3624 | vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn)((((&(&s_i_d[(sched_luids[INSN_UID (sparams->failed_insn )])])->expr))->vinsn)); |
3625 | |
3626 | if (register_unavailable_p (VINSN_REG_SETS (failed_vinsn)((((&((failed_vinsn)->id)))->reg_sets)), reg) |
3627 | || register_unavailable_p (VINSN_REG_USES (failed_vinsn)((((&((failed_vinsn)->id)))->reg_uses)), reg) |
3628 | || register_unavailable_p (VINSN_REG_CLOBBERS (failed_vinsn)((((&((failed_vinsn)->id)))->reg_clobbers)), reg)) |
3629 | return true; |
3630 | } |
3631 | |
3632 | return false; |
3633 | } |
3634 | |
3635 | /* Clear VINSN_VEC and detach vinsns. */ |
3636 | static void |
3637 | vinsn_vec_clear (vinsn_vec_t *vinsn_vec) |
3638 | { |
3639 | unsigned len = vinsn_vec->length (); |
3640 | if (len > 0) |
3641 | { |
3642 | vinsn_t vinsn; |
3643 | int n; |
3644 | |
3645 | FOR_EACH_VEC_ELT (*vinsn_vec, n, vinsn)for (n = 0; (*vinsn_vec).iterate ((n), &(vinsn)); ++(n)) |
3646 | vinsn_detach (vinsn); |
3647 | vinsn_vec->block_remove (0, len); |
3648 | } |
3649 | } |
3650 | |
3651 | /* Add the vinsn of EXPR to the VINSN_VEC. */ |
3652 | static void |
3653 | vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr) |
3654 | { |
3655 | vinsn_attach (EXPR_VINSN (expr)((expr)->vinsn)); |
3656 | vinsn_vec->safe_push (EXPR_VINSN (expr)((expr)->vinsn)); |
3657 | } |
3658 | |
3659 | /* Free the vector representing blocked expressions. */ |
3660 | static void |
3661 | vinsn_vec_free (vinsn_vec_t &vinsn_vec) |
3662 | { |
3663 | vinsn_vec.release (); |
3664 | } |
3665 | |
3666 | /* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT. */ |
3667 | |
3668 | void sel_add_to_insn_priority (rtx insn, int amount) |
3669 | { |
3670 | EXPR_PRIORITY_ADJ (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->priority_adj) += amount; |
3671 | |
3672 | if (sched_verbose >= 2) |
3673 | sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n", |
3674 | INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->priority), |
3675 | EXPR_PRIORITY_ADJ (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->priority_adj)); |
3676 | } |
3677 | |
3678 | /* Turn AV into a vector, filter inappropriate insns and sort it. Return |
3679 | true if there is something to schedule. BNDS and FENCE are current |
3680 | boundaries and fence, respectively. If we need to stall for some cycles |
3681 | before an expr from AV would become available, write this number to |
3682 | *PNEED_STALL. */ |
3683 | static bool |
3684 | fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence, |
3685 | int *pneed_stall) |
3686 | { |
3687 | av_set_iterator si; |
3688 | expr_t expr; |
3689 | int sched_next_worked = 0, stalled, n; |
3690 | static int av_max_prio, est_ticks_till_branch; |
3691 | int min_need_stall = -1; |
3692 | deps_t dc = BND_DC (BLIST_BND (bnds))(((&(bnds)->u.bnd))->dc); |
3693 | |
3694 | /* Bail out early when the ready list contained only USEs/CLOBBERs that are |
3695 | already scheduled. */ |
3696 | if (av == NULLnullptr) |
3697 | return false; |
3698 | |
3699 | /* Empty vector from the previous stuff. */ |
3700 | if (vec_av_set.length () > 0) |
3701 | vec_av_set.block_remove (0, vec_av_set.length ()); |
3702 | |
3703 | /* Turn the set into a vector for sorting and call sel_target_adjust_priority |
3704 | for each insn. */ |
3705 | gcc_assert (vec_av_set.is_empty ())((void)(!(vec_av_set.is_empty ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3705, __FUNCTION__), 0 : 0)); |
3706 | FOR_EACH_EXPR (expr, si, av)for (_list_iter_start (&((si)), &((av)), false); _list_iter_cond_expr (*((si)).lp, &((expr))); _list_iter_next (&((si)))) |
3707 | { |
3708 | vec_av_set.safe_push (expr); |
3709 | |
3710 | gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall)((void)(!(((expr)->priority_adj) == 0 || *pneed_stall) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3710, __FUNCTION__), 0 : 0)); |
3711 | |
3712 | /* Adjust priority using target backend hook. */ |
3713 | sel_target_adjust_priority (expr); |
3714 | } |
3715 | |
3716 | /* Sort the vector. */ |
3717 | vec_av_set.qsort (sel_rank_for_schedule)qsort (sel_rank_for_schedule); |
3718 | |
3719 | /* We record maximal priority of insns in av set for current instruction |
3720 | group. */ |
3721 | if (FENCE_STARTS_CYCLE_P (fence)((fence)->starts_cycle_p)) |
3722 | av_max_prio = est_ticks_till_branch = INT_MIN(-2147483647 -1); |
3723 | |
3724 | /* Filter out inappropriate expressions. Loop's direction is reversed to |
3725 | visit "best" instructions first. We assume that vec::unordered_remove |
3726 | moves last element in place of one being deleted. */ |
3727 | for (n = vec_av_set.length () - 1, stalled = 0; n >= 0; n--) |
3728 | { |
3729 | expr_t expr = vec_av_set[n]; |
3730 | insn_t insn = EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)); |
3731 | signed char target_available; |
3732 | bool is_orig_reg_p = true; |
3733 | int need_cycles, new_prio; |
3734 | bool fence_insn_p = INSN_UID (insn) == INSN_UID (FENCE_INSN (fence)((fence)->insn)); |
3735 | |
3736 | /* Don't allow any insns other than from SCHED_GROUP if we have one. */ |
3737 | if (FENCE_SCHED_NEXT (fence)((fence)->sched_next) && insn != FENCE_SCHED_NEXT (fence)((fence)->sched_next)) |
3738 | { |
3739 | vec_av_set.unordered_remove (n); |
3740 | continue; |
3741 | } |
3742 | |
3743 | /* Set number of sched_next insns (just in case there |
3744 | could be several). */ |
3745 | if (FENCE_SCHED_NEXT (fence)((fence)->sched_next)) |
3746 | sched_next_worked++; |
3747 | |
3748 | /* Check all liveness requirements and try renaming. |
3749 | FIXME: try to minimize calls to this. */ |
3750 | target_available = EXPR_TARGET_AVAILABLE (expr)((expr)->target_available); |
3751 | |
3752 | /* If insn was already scheduled on the current fence, |
3753 | set TARGET_AVAILABLE to -1 no matter what expr's attribute says. */ |
3754 | if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr) |
3755 | && !fence_insn_p) |
3756 | target_available = -1; |
3757 | |
3758 | /* If the availability of the EXPR is invalidated by the insertion of |
3759 | bookkeeping earlier, make sure that we won't choose this expr for |
3760 | scheduling if it's not separable, and if it is separable, then |
3761 | we have to recompute the set of available registers for it. */ |
3762 | if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr)) |
3763 | { |
3764 | vec_av_set.unordered_remove (n); |
3765 | if (sched_verbose >= 4) |
3766 | sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n", |
3767 | INSN_UID (insn)); |
3768 | continue; |
3769 | } |
3770 | |
3771 | if (target_available == true) |
3772 | { |
3773 | /* Do nothing -- we can use an existing register. */ |
3774 | is_orig_reg_p = EXPR_SEPARABLE_P (expr)((((((&((((expr)->vinsn))->id)))->type)) == SET) ); |
3775 | } |
3776 | else if (/* Non-separable instruction will never |
3777 | get another register. */ |
3778 | (target_available == false |
3779 | && !EXPR_SEPARABLE_P (expr)((((((&((((expr)->vinsn))->id)))->type)) == SET) )) |
3780 | /* Don't try to find a register for low-priority expression. */ |
3781 | || (int) vec_av_set.length () - 1 - n >= max_insns_to_rename |
3782 | /* ??? FIXME: Don't try to rename data speculation. */ |
3783 | || (EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds) & BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET )) |
3784 | || ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p)) |
3785 | { |
3786 | vec_av_set.unordered_remove (n); |
3787 | if (sched_verbose >= 4) |
3788 | sel_print ("Expr %d has no suitable target register\n", |
3789 | INSN_UID (insn)); |
3790 | |
3791 | /* A fence insn should not get here. */ |
3792 | gcc_assert (!fence_insn_p)((void)(!(!fence_insn_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3792, __FUNCTION__), 0 : 0)); |
3793 | continue; |
3794 | } |
3795 | |
3796 | /* At this point a fence insn should always be available. */ |
3797 | gcc_assert (!fence_insn_p((void)(!(!fence_insn_p || INSN_UID (((fence)->insn)) == INSN_UID ((((((expr)->vinsn))->insn_rtx)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3798, __FUNCTION__), 0 : 0)) |
3798 | || INSN_UID (FENCE_INSN (fence)) == INSN_UID (EXPR_INSN_RTX (expr)))((void)(!(!fence_insn_p || INSN_UID (((fence)->insn)) == INSN_UID ((((((expr)->vinsn))->insn_rtx)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3798, __FUNCTION__), 0 : 0)); |
3799 | |
3800 | /* Filter expressions that need to be renamed or speculated when |
3801 | pipelining, because compensating register copies or speculation |
3802 | checks are likely to be placed near the beginning of the loop, |
3803 | causing a stall. */ |
3804 | if (pipelining_p && EXPR_ORIG_SCHED_CYCLE (expr)((expr)->orig_sched_cycle) > 0 |
3805 | && (!is_orig_reg_p || EXPR_SPEC_DONE_DS (expr)((expr)->spec_done_ds) != 0)) |
3806 | { |
3807 | /* Estimation of number of cycles until loop branch for |
3808 | renaming/speculation to be successful. */ |
3809 | int need_n_ticks_till_branch = sel_vinsn_cost (EXPR_VINSN (expr)((expr)->vinsn)); |
3810 | |
3811 | if ((int) current_loop_nest->ninsns < 9) |
3812 | { |
3813 | vec_av_set.unordered_remove (n); |
3814 | if (sched_verbose >= 4) |
3815 | sel_print ("Pipelining expr %d will likely cause stall\n", |
3816 | INSN_UID (insn)); |
3817 | continue; |
3818 | } |
3819 | |
3820 | if ((int) current_loop_nest->ninsns - num_insns_scheduled |
3821 | < need_n_ticks_till_branch * issue_rate / 2 |
3822 | && est_ticks_till_branch < need_n_ticks_till_branch) |
3823 | { |
3824 | vec_av_set.unordered_remove (n); |
3825 | if (sched_verbose >= 4) |
3826 | sel_print ("Pipelining expr %d will likely cause stall\n", |
3827 | INSN_UID (insn)); |
3828 | continue; |
3829 | } |
3830 | } |
3831 | |
3832 | /* We want to schedule speculation checks as late as possible. Discard |
3833 | them from av set if there are instructions with higher priority. */ |
3834 | if (sel_insn_is_speculation_check (insn) |
3835 | && EXPR_PRIORITY (expr)((expr)->priority) < av_max_prio) |
3836 | { |
3837 | stalled++; |
3838 | min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1)((min_need_stall) < (1) ? (min_need_stall) : (1)); |
3839 | vec_av_set.unordered_remove (n); |
3840 | if (sched_verbose >= 4) |
3841 | sel_print ("Delaying speculation check %d until its first use\n", |
3842 | INSN_UID (insn)); |
3843 | continue; |
3844 | } |
3845 | |
3846 | /* Ignore EXPRs available from pipelining to update AV_MAX_PRIO. */ |
3847 | if (EXPR_ORIG_SCHED_CYCLE (expr)((expr)->orig_sched_cycle) <= 0) |
3848 | av_max_prio = MAX (av_max_prio, EXPR_PRIORITY (expr))((av_max_prio) > (((expr)->priority)) ? (av_max_prio) : (((expr)->priority))); |
3849 | |
3850 | /* Don't allow any insns whose data is not yet ready. |
3851 | Check first whether we've already tried them and failed. */ |
3852 | if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence)((fence)->ready_ticks_size)) |
3853 | { |
3854 | need_cycles = (FENCE_READY_TICKS (fence)((fence)->ready_ticks)[INSN_UID (insn)] |
3855 | - FENCE_CYCLE (fence)((fence)->cycle)); |
3856 | if (EXPR_ORIG_SCHED_CYCLE (expr)((expr)->orig_sched_cycle) <= 0) |
3857 | est_ticks_till_branch = MAX (est_ticks_till_branch,((est_ticks_till_branch) > (((expr)->priority) + need_cycles ) ? (est_ticks_till_branch) : (((expr)->priority) + need_cycles )) |
3858 | EXPR_PRIORITY (expr) + need_cycles)((est_ticks_till_branch) > (((expr)->priority) + need_cycles ) ? (est_ticks_till_branch) : (((expr)->priority) + need_cycles )); |
3859 | |
3860 | if (need_cycles > 0) |
3861 | { |
3862 | stalled++; |
3863 | min_need_stall = (min_need_stall < 0 |
3864 | ? need_cycles |
3865 | : MIN (min_need_stall, need_cycles)((min_need_stall) < (need_cycles) ? (min_need_stall) : (need_cycles ))); |
3866 | vec_av_set.unordered_remove (n); |
3867 | |
3868 | if (sched_verbose >= 4) |
3869 | sel_print ("Expr %d is not ready until cycle %d (cached)\n", |
3870 | INSN_UID (insn), |
3871 | FENCE_READY_TICKS (fence)((fence)->ready_ticks)[INSN_UID (insn)]); |
3872 | continue; |
3873 | } |
3874 | } |
3875 | |
3876 | /* Now resort to dependence analysis to find whether EXPR might be |
3877 | stalled due to dependencies from FENCE's context. */ |
3878 | need_cycles = tick_check_p (expr, dc, fence); |
3879 | new_prio = EXPR_PRIORITY (expr)((expr)->priority) + EXPR_PRIORITY_ADJ (expr)((expr)->priority_adj) + need_cycles; |
3880 | |
3881 | if (EXPR_ORIG_SCHED_CYCLE (expr)((expr)->orig_sched_cycle) <= 0) |
3882 | est_ticks_till_branch = MAX (est_ticks_till_branch,((est_ticks_till_branch) > (new_prio) ? (est_ticks_till_branch ) : (new_prio)) |
3883 | new_prio)((est_ticks_till_branch) > (new_prio) ? (est_ticks_till_branch ) : (new_prio)); |
3884 | |
3885 | if (need_cycles > 0) |
3886 | { |
3887 | if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence)((fence)->ready_ticks_size)) |
3888 | { |
3889 | int new_size = INSN_UID (insn) * 3 / 2; |
3890 | |
3891 | FENCE_READY_TICKS (fence)((fence)->ready_ticks) |
3892 | = (int *) xrecalloc (FENCE_READY_TICKS (fence)((fence)->ready_ticks), |
3893 | new_size, FENCE_READY_TICKS_SIZE (fence)((fence)->ready_ticks_size), |
3894 | sizeof (int)); |
3895 | } |
3896 | FENCE_READY_TICKS (fence)((fence)->ready_ticks)[INSN_UID (insn)] |
3897 | = FENCE_CYCLE (fence)((fence)->cycle) + need_cycles; |
3898 | |
3899 | stalled++; |
3900 | min_need_stall = (min_need_stall < 0 |
3901 | ? need_cycles |
3902 | : MIN (min_need_stall, need_cycles)((min_need_stall) < (need_cycles) ? (min_need_stall) : (need_cycles ))); |
3903 | |
3904 | vec_av_set.unordered_remove (n); |
3905 | |
3906 | if (sched_verbose >= 4) |
3907 | sel_print ("Expr %d is not ready yet until cycle %d\n", |
3908 | INSN_UID (insn), |
3909 | FENCE_READY_TICKS (fence)((fence)->ready_ticks)[INSN_UID (insn)]); |
3910 | continue; |
3911 | } |
3912 | |
3913 | if (sched_verbose >= 4) |
3914 | sel_print ("Expr %d is ok\n", INSN_UID (insn)); |
3915 | min_need_stall = 0; |
3916 | } |
3917 | |
3918 | /* Clear SCHED_NEXT. */ |
3919 | if (FENCE_SCHED_NEXT (fence)((fence)->sched_next)) |
3920 | { |
3921 | gcc_assert (sched_next_worked == 1)((void)(!(sched_next_worked == 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3921, __FUNCTION__), 0 : 0)); |
3922 | FENCE_SCHED_NEXT (fence)((fence)->sched_next) = NULLnullptr; |
3923 | } |
3924 | |
3925 | /* No need to stall if this variable was not initialized. */ |
3926 | if (min_need_stall < 0) |
3927 | min_need_stall = 0; |
3928 | |
3929 | if (vec_av_set.is_empty ()) |
3930 | { |
3931 | /* We need to set *pneed_stall here, because later we skip this code |
3932 | when ready list is empty. */ |
3933 | *pneed_stall = min_need_stall; |
3934 | return false; |
3935 | } |
3936 | else |
3937 | gcc_assert (min_need_stall == 0)((void)(!(min_need_stall == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3937, __FUNCTION__), 0 : 0)); |
3938 | |
3939 | /* Sort the vector. */ |
3940 | vec_av_set.qsort (sel_rank_for_schedule)qsort (sel_rank_for_schedule); |
3941 | |
3942 | if (sched_verbose >= 4) |
3943 | { |
3944 | sel_print ("Total ready exprs: %d, stalled: %d\n", |
3945 | vec_av_set.length (), stalled); |
3946 | sel_print ("Sorted av set (%d): ", vec_av_set.length ()); |
3947 | FOR_EACH_VEC_ELT (vec_av_set, n, expr)for (n = 0; (vec_av_set).iterate ((n), &(expr)); ++(n)) |
3948 | dump_expr (expr); |
3949 | sel_print ("\n"); |
3950 | } |
3951 | |
3952 | *pneed_stall = 0; |
3953 | return true; |
3954 | } |
3955 | |
3956 | /* Convert a vectored and sorted av set to the ready list that |
3957 | the rest of the backend wants to see. */ |
3958 | static void |
3959 | convert_vec_av_set_to_ready (void) |
3960 | { |
3961 | int n; |
3962 | expr_t expr; |
3963 | |
3964 | /* Allocate and fill the ready list from the sorted vector. */ |
3965 | ready.n_ready = vec_av_set.length (); |
3966 | ready.first = ready.n_ready - 1; |
3967 | |
3968 | gcc_assert (ready.n_ready > 0)((void)(!(ready.n_ready > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3968, __FUNCTION__), 0 : 0)); |
3969 | |
3970 | if (ready.n_ready > max_issue_size) |
3971 | { |
3972 | max_issue_size = ready.n_ready; |
3973 | sched_extend_ready_list (ready.n_ready); |
3974 | } |
3975 | |
3976 | FOR_EACH_VEC_ELT (vec_av_set, n, expr)for (n = 0; (vec_av_set).iterate ((n), &(expr)); ++(n)) |
3977 | { |
3978 | vinsn_t vi = EXPR_VINSN (expr)((expr)->vinsn); |
3979 | insn_t insn = VINSN_INSN_RTX (vi)((vi)->insn_rtx); |
3980 | |
3981 | ready_try[n] = 0; |
3982 | ready.vec[n] = insn; |
3983 | } |
3984 | } |
3985 | |
3986 | /* Initialize ready list from *AV_PTR for the max_issue () call. |
3987 | If any unrecognizable insn found in *AV_PTR, return it (and skip |
3988 | max_issue). BND and FENCE are current boundary and fence, |
3989 | respectively. If we need to stall for some cycles before an expr |
3990 | from *AV_PTR would become available, write this number to *PNEED_STALL. */ |
3991 | static expr_t |
3992 | fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence, |
3993 | int *pneed_stall) |
3994 | { |
3995 | expr_t expr; |
3996 | |
3997 | /* We do not support multiple boundaries per fence. */ |
3998 | gcc_assert (BLIST_NEXT (bnds) == NULL)((void)(!((((bnds)->next)) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 3998, __FUNCTION__), 0 : 0)); |
3999 | |
4000 | /* Process expressions required special handling, i.e. pipelined, |
4001 | speculative and recog() < 0 expressions first. */ |
4002 | process_pipelined_exprs (av_ptr); |
4003 | process_spec_exprs (av_ptr); |
4004 | |
4005 | /* A USE could be scheduled immediately. */ |
4006 | expr = process_use_exprs (av_ptr); |
4007 | if (expr) |
4008 | { |
4009 | *pneed_stall = 0; |
4010 | return expr; |
4011 | } |
4012 | |
4013 | /* Turn the av set to a vector for sorting. */ |
4014 | if (! fill_vec_av_set (*av_ptr, bnds, fence, pneed_stall)) |
4015 | { |
4016 | ready.n_ready = 0; |
4017 | return NULLnullptr; |
4018 | } |
4019 | |
4020 | /* Build the final ready list. */ |
4021 | convert_vec_av_set_to_ready (); |
4022 | return NULLnullptr; |
4023 | } |
4024 | |
4025 | /* Wrapper for dfa_new_cycle (). Returns TRUE if cycle was advanced. */ |
4026 | static bool |
4027 | sel_dfa_new_cycle (insn_t insn, fence_t fence) |
4028 | { |
4029 | int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)((fence)->last_scheduled_insn) |
4030 | ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))((&s_i_d[(sched_luids[INSN_UID (((fence)->last_scheduled_insn ))])])->sched_cycle) |
4031 | : FENCE_CYCLE (fence)((fence)->cycle) - 1; |
4032 | bool res = false; |
4033 | int sort_p = 0; |
4034 | |
4035 | if (!targetm.sched.dfa_new_cycle) |
4036 | return false; |
4037 | |
4038 | memcpy (curr_state, FENCE_STATE (fence)((fence)->state), dfa_state_size); |
4039 | |
4040 | while (!sort_p && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, |
4041 | insn, last_scheduled_cycle, |
4042 | FENCE_CYCLE (fence)((fence)->cycle), &sort_p)) |
4043 | { |
4044 | memcpy (FENCE_STATE (fence)((fence)->state), curr_state, dfa_state_size); |
4045 | advance_one_cycle (fence); |
4046 | memcpy (curr_state, FENCE_STATE (fence)((fence)->state), dfa_state_size); |
4047 | res = true; |
4048 | } |
4049 | |
4050 | return res; |
4051 | } |
4052 | |
4053 | /* Invoke reorder* target hooks on the ready list. Return the number of insns |
4054 | we can issue. FENCE is the current fence. */ |
4055 | static int |
4056 | invoke_reorder_hooks (fence_t fence) |
4057 | { |
4058 | int issue_more; |
4059 | bool ran_hook = false; |
4060 | |
4061 | /* Call the reorder hook at the beginning of the cycle, and call |
4062 | the reorder2 hook in the middle of the cycle. */ |
4063 | if (FENCE_ISSUED_INSNS (fence)((fence)->cycle_issued_insns) == 0) |
4064 | { |
4065 | if (targetm.sched.reorder |
4066 | && !SCHED_GROUP_P (ready_element (&ready, 0))(__extension__ ({ __typeof ((ready_element (&ready, 0))) const _rtx = ((ready_element (&ready, 0))); if (((enum rtx_code ) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) ( _rtx)->code) != INSN && ((enum rtx_code) (_rtx)-> code) != JUMP_INSN && ((enum rtx_code) (_rtx)->code ) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4066, __FUNCTION__); _rtx; })->in_struct) |
4067 | && ready.n_ready > 1) |
4068 | { |
4069 | /* Don't give reorder the most prioritized insn as it can break |
4070 | pipelining. */ |
4071 | if (pipelining_p) |
4072 | --ready.n_ready; |
4073 | |
4074 | issue_more |
4075 | = targetm.sched.reorder (sched_dump, sched_verbose, |
4076 | ready_lastpos (&ready), |
4077 | &ready.n_ready, FENCE_CYCLE (fence)((fence)->cycle)); |
4078 | |
4079 | if (pipelining_p) |
4080 | ++ready.n_ready; |
4081 | |
4082 | ran_hook = true; |
4083 | } |
4084 | else |
4085 | /* Initialize can_issue_more for variable_issue. */ |
4086 | issue_more = issue_rate; |
4087 | } |
4088 | else if (targetm.sched.reorder2 |
4089 | && !SCHED_GROUP_P (ready_element (&ready, 0))(__extension__ ({ __typeof ((ready_element (&ready, 0))) const _rtx = ((ready_element (&ready, 0))); if (((enum rtx_code ) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) ( _rtx)->code) != INSN && ((enum rtx_code) (_rtx)-> code) != JUMP_INSN && ((enum rtx_code) (_rtx)->code ) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4089, __FUNCTION__); _rtx; })->in_struct)) |
4090 | { |
4091 | if (ready.n_ready == 1) |
4092 | issue_more = |
4093 | targetm.sched.reorder2 (sched_dump, sched_verbose, |
4094 | ready_lastpos (&ready), |
4095 | &ready.n_ready, FENCE_CYCLE (fence)((fence)->cycle)); |
4096 | else |
4097 | { |
4098 | if (pipelining_p) |
4099 | --ready.n_ready; |
4100 | |
4101 | issue_more = |
4102 | targetm.sched.reorder2 (sched_dump, sched_verbose, |
4103 | ready.n_ready |
4104 | ? ready_lastpos (&ready) : NULLnullptr, |
4105 | &ready.n_ready, FENCE_CYCLE (fence)((fence)->cycle)); |
4106 | |
4107 | if (pipelining_p) |
4108 | ++ready.n_ready; |
4109 | } |
4110 | |
4111 | ran_hook = true; |
4112 | } |
4113 | else |
4114 | issue_more = FENCE_ISSUE_MORE (fence)((fence)->issue_more); |
4115 | |
4116 | /* Ensure that ready list and vec_av_set are in line with each other, |
4117 | i.e. vec_av_set[i] == ready_element (&ready, i). */ |
4118 | if (issue_more && ran_hook) |
4119 | { |
4120 | int i, j, n; |
4121 | rtx_insn **arr = ready.vec; |
4122 | expr_t *vec = vec_av_set.address (); |
4123 | |
4124 | for (i = 0, n = ready.n_ready; i < n; i++) |
4125 | if (EXPR_INSN_RTX (vec[i])(((((vec[i])->vinsn))->insn_rtx)) != arr[i]) |
4126 | { |
4127 | for (j = i; j < n; j++) |
4128 | if (EXPR_INSN_RTX (vec[j])(((((vec[j])->vinsn))->insn_rtx)) == arr[i]) |
4129 | break; |
4130 | gcc_assert (j < n)((void)(!(j < n) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4130, __FUNCTION__), 0 : 0)); |
4131 | |
4132 | std::swap (vec[i], vec[j]); |
4133 | } |
4134 | } |
4135 | |
4136 | return issue_more; |
4137 | } |
4138 | |
4139 | /* Return an EXPR corresponding to INDEX element of ready list, if |
4140 | FOLLOW_READY_ELEMENT is true (i.e., an expr of |
4141 | ready_element (&ready, INDEX) will be returned), and to INDEX element of |
4142 | ready.vec otherwise. */ |
4143 | static inline expr_t |
4144 | find_expr_for_ready (int index, bool follow_ready_element) |
4145 | { |
4146 | expr_t expr; |
4147 | int real_index; |
4148 | |
4149 | real_index = follow_ready_element ? ready.first - index : index; |
4150 | |
4151 | expr = vec_av_set[real_index]; |
4152 | gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr))((void)(!(ready.vec[real_index] == (((((expr)->vinsn))-> insn_rtx))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4152, __FUNCTION__), 0 : 0)); |
4153 | |
4154 | return expr; |
4155 | } |
4156 | |
4157 | /* Calculate insns worth trying via lookahead_guard hook. Return a number |
4158 | of such insns found. */ |
4159 | static int |
4160 | invoke_dfa_lookahead_guard (void) |
4161 | { |
4162 | int i, n; |
4163 | bool have_hook |
4164 | = targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULLnullptr; |
4165 | |
4166 | if (sched_verbose >= 2) |
4167 | sel_print ("ready after reorder: "); |
4168 | |
4169 | for (i = 0, n = 0; i < ready.n_ready; i++) |
4170 | { |
4171 | expr_t expr; |
4172 | insn_t insn; |
4173 | int r; |
4174 | |
4175 | /* In this loop insn is Ith element of the ready list given by |
4176 | ready_element, not Ith element of ready.vec. */ |
4177 | insn = ready_element (&ready, i); |
4178 | |
4179 | if (! have_hook || i == 0) |
4180 | r = 0; |
4181 | else |
4182 | r = targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn, i); |
4183 | |
4184 | gcc_assert (INSN_CODE (insn) >= 0)((void)(!((((insn)->u.fld[5]).rt_int) >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4184, __FUNCTION__), 0 : 0)); |
4185 | |
4186 | /* Only insns with ready_try = 0 can get here |
4187 | from fill_ready_list. */ |
4188 | gcc_assert (ready_try [i] == 0)((void)(!(ready_try [i] == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4188, __FUNCTION__), 0 : 0)); |
4189 | ready_try[i] = r; |
4190 | if (!r) |
4191 | n++; |
4192 | |
4193 | expr = find_expr_for_ready (i, true); |
4194 | |
4195 | if (sched_verbose >= 2) |
4196 | { |
4197 | dump_vinsn (EXPR_VINSN (expr)((expr)->vinsn)); |
4198 | sel_print (":%d; ", ready_try[i]); |
4199 | } |
4200 | } |
4201 | |
4202 | if (sched_verbose >= 2) |
4203 | sel_print ("\n"); |
4204 | return n; |
4205 | } |
4206 | |
4207 | /* Calculate the number of privileged insns and return it. */ |
4208 | static int |
4209 | calculate_privileged_insns (void) |
4210 | { |
4211 | expr_t cur_expr, min_spec_expr = NULLnullptr; |
4212 | int privileged_n = 0, i; |
4213 | |
4214 | for (i = 0; i < ready.n_ready; i++) |
4215 | { |
4216 | if (ready_try[i]) |
4217 | continue; |
4218 | |
4219 | if (! min_spec_expr) |
4220 | min_spec_expr = find_expr_for_ready (i, true); |
4221 | |
4222 | cur_expr = find_expr_for_ready (i, true); |
4223 | |
4224 | if (EXPR_SPEC (cur_expr)((cur_expr)->spec) > EXPR_SPEC (min_spec_expr)((min_spec_expr)->spec)) |
4225 | break; |
4226 | |
4227 | ++privileged_n; |
4228 | } |
4229 | |
4230 | if (i == ready.n_ready) |
4231 | privileged_n = 0; |
4232 | |
4233 | if (sched_verbose >= 2) |
4234 | sel_print ("privileged_n: %d insns with SPEC %d\n", |
4235 | privileged_n, privileged_n ? EXPR_SPEC (min_spec_expr)((min_spec_expr)->spec) : -1); |
4236 | return privileged_n; |
4237 | } |
4238 | |
4239 | /* Call the rest of the hooks after the choice was made. Return |
4240 | the number of insns that still can be issued given that the current |
4241 | number is ISSUE_MORE. FENCE and BEST_INSN are the current fence |
4242 | and the insn chosen for scheduling, respectively. */ |
4243 | static int |
4244 | invoke_aftermath_hooks (fence_t fence, rtx_insn *best_insn, int issue_more) |
4245 | { |
4246 | gcc_assert (INSN_P (best_insn))((void)(!((((((enum rtx_code) (best_insn)->code) == INSN) || (((enum rtx_code) (best_insn)->code) == JUMP_INSN) || ((( enum rtx_code) (best_insn)->code) == CALL_INSN)) || (((enum rtx_code) (best_insn)->code) == DEBUG_INSN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4246, __FUNCTION__), 0 : 0)); |
4247 | |
4248 | /* First, call dfa_new_cycle, and then variable_issue, if available. */ |
4249 | sel_dfa_new_cycle (best_insn, fence); |
4250 | |
4251 | if (targetm.sched.variable_issue) |
4252 | { |
4253 | memcpy (curr_state, FENCE_STATE (fence)((fence)->state), dfa_state_size); |
4254 | issue_more = |
4255 | targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn, |
4256 | issue_more); |
4257 | memcpy (FENCE_STATE (fence)((fence)->state), curr_state, dfa_state_size); |
4258 | } |
4259 | else if (!DEBUG_INSN_P (best_insn)(((enum rtx_code) (best_insn)->code) == DEBUG_INSN) |
4260 | && GET_CODE (PATTERN (best_insn))((enum rtx_code) (PATTERN (best_insn))->code) != USE |
4261 | && GET_CODE (PATTERN (best_insn))((enum rtx_code) (PATTERN (best_insn))->code) != CLOBBER) |
4262 | issue_more--; |
4263 | |
4264 | return issue_more; |
4265 | } |
4266 | |
4267 | /* Estimate the cost of issuing INSN on DFA state STATE. */ |
4268 | static int |
4269 | estimate_insn_cost (rtx_insn *insn, state_t state) |
4270 | { |
4271 | static state_t temp = NULLnullptr; |
4272 | int cost; |
4273 | |
4274 | if (!temp) |
4275 | temp = xmalloc (dfa_state_size); |
4276 | |
4277 | memcpy (temp, state, dfa_state_size); |
4278 | cost = state_transition (temp, insn); |
4279 | |
4280 | if (cost < 0) |
4281 | return 0; |
4282 | else if (cost == 0) |
4283 | return 1; |
4284 | return cost; |
4285 | } |
4286 | |
4287 | /* Return the cost of issuing EXPR on the FENCE as estimated by DFA. |
4288 | This function properly handles ASMs, USEs etc. */ |
4289 | static int |
4290 | get_expr_cost (expr_t expr, fence_t fence) |
4291 | { |
4292 | rtx_insn *insn = EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)); |
4293 | |
4294 | if (recog_memoized (insn) < 0) |
4295 | { |
4296 | if (!FENCE_STARTS_CYCLE_P (fence)((fence)->starts_cycle_p) |
4297 | && INSN_ASM_P (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->asm_p)) |
4298 | /* This is asm insn which is tryed to be issued on the |
4299 | cycle not first. Issue it on the next cycle. */ |
4300 | return 1; |
4301 | else |
4302 | /* A USE insn, or something else we don't need to |
4303 | understand. We can't pass these directly to |
4304 | state_transition because it will trigger a |
4305 | fatal error for unrecognizable insns. */ |
4306 | return 0; |
4307 | } |
4308 | else |
4309 | return estimate_insn_cost (insn, FENCE_STATE (fence)((fence)->state)); |
4310 | } |
4311 | |
4312 | /* Find the best insn for scheduling, either via max_issue or just take |
4313 | the most prioritized available. */ |
4314 | static int |
4315 | choose_best_insn (fence_t fence, int privileged_n, int *index) |
4316 | { |
4317 | int can_issue = 0; |
4318 | |
4319 | if (dfa_lookahead > 0) |
4320 | { |
4321 | cycle_issued_insns = FENCE_ISSUED_INSNS (fence)((fence)->cycle_issued_insns); |
4322 | /* TODO: pass equivalent of first_cycle_insn_p to max_issue (). */ |
4323 | can_issue = max_issue (&ready, privileged_n, |
4324 | FENCE_STATE (fence)((fence)->state), true, index); |
4325 | if (sched_verbose >= 2) |
4326 | sel_print ("max_issue: we can issue %d insns, already did %d insns\n", |
4327 | can_issue, FENCE_ISSUED_INSNS (fence)((fence)->cycle_issued_insns)); |
4328 | } |
4329 | else |
4330 | { |
4331 | /* We can't use max_issue; just return the first available element. */ |
4332 | int i; |
4333 | |
4334 | for (i = 0; i < ready.n_ready; i++) |
4335 | { |
4336 | expr_t expr = find_expr_for_ready (i, true); |
4337 | |
4338 | if (get_expr_cost (expr, fence) < 1) |
4339 | { |
4340 | can_issue = can_issue_more; |
4341 | *index = i; |
4342 | |
4343 | if (sched_verbose >= 2) |
4344 | sel_print ("using %dth insn from the ready list\n", i + 1); |
4345 | |
4346 | break; |
4347 | } |
4348 | } |
4349 | |
4350 | if (i == ready.n_ready) |
4351 | { |
4352 | can_issue = 0; |
4353 | *index = -1; |
4354 | } |
4355 | } |
4356 | |
4357 | return can_issue; |
4358 | } |
4359 | |
4360 | /* Choose the best expr from *AV_VLIW_PTR and a suitable register for it. |
4361 | BNDS and FENCE are current boundaries and scheduling fence respectively. |
4362 | Return the expr found and NULL if nothing can be issued atm. |
4363 | Write to PNEED_STALL the number of cycles to stall if no expr was found. */ |
4364 | static expr_t |
4365 | find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence, |
4366 | int *pneed_stall) |
4367 | { |
4368 | expr_t best; |
4369 | |
4370 | /* Choose the best insn for scheduling via: |
4371 | 1) sorting the ready list based on priority; |
4372 | 2) calling the reorder hook; |
4373 | 3) calling max_issue. */ |
4374 | best = fill_ready_list (av_vliw_ptr, bnds, fence, pneed_stall); |
4375 | if (best == NULLnullptr && ready.n_ready > 0) |
4376 | { |
4377 | int privileged_n, index; |
4378 | |
4379 | can_issue_more = invoke_reorder_hooks (fence); |
4380 | if (can_issue_more > 0) |
4381 | { |
4382 | /* Try choosing the best insn until we find one that is could be |
4383 | scheduled due to liveness restrictions on its destination register. |
4384 | In the future, we'd like to choose once and then just probe insns |
4385 | in the order of their priority. */ |
4386 | invoke_dfa_lookahead_guard (); |
4387 | privileged_n = calculate_privileged_insns (); |
4388 | can_issue_more = choose_best_insn (fence, privileged_n, &index); |
4389 | if (can_issue_more) |
4390 | best = find_expr_for_ready (index, true); |
4391 | } |
4392 | /* We had some available insns, so if we can't issue them, |
4393 | we have a stall. */ |
4394 | if (can_issue_more == 0) |
4395 | { |
4396 | best = NULLnullptr; |
4397 | *pneed_stall = 1; |
4398 | } |
4399 | } |
4400 | |
4401 | if (best != NULLnullptr) |
4402 | { |
4403 | can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best)(((((best)->vinsn))->insn_rtx)), |
4404 | can_issue_more); |
4405 | if (targetm.sched.variable_issue |
4406 | && can_issue_more == 0) |
4407 | *pneed_stall = 1; |
4408 | } |
4409 | |
4410 | if (sched_verbose >= 2) |
4411 | { |
4412 | if (best != NULLnullptr) |
4413 | { |
4414 | sel_print ("Best expression (vliw form): "); |
4415 | dump_expr (best); |
4416 | sel_print ("; cycle %d\n", FENCE_CYCLE (fence)((fence)->cycle)); |
4417 | } |
4418 | else |
4419 | sel_print ("No best expr found!\n"); |
4420 | } |
4421 | |
4422 | return best; |
4423 | } |
4424 | |
4425 | |
4426 | /* Functions that implement the core of the scheduler. */ |
4427 | |
4428 | |
4429 | /* Emit an instruction from EXPR with SEQNO and VINSN after |
4430 | PLACE_TO_INSERT. */ |
4431 | static insn_t |
4432 | emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
4433 | insn_t place_to_insert) |
4434 | { |
4435 | /* This assert fails when we have identical instructions |
4436 | one of which dominates the other. In this case move_op () |
4437 | finds the first instruction and doesn't search for second one. |
4438 | The solution would be to compute av_set after the first found |
4439 | insn and, if insn present in that set, continue searching. |
4440 | For now we workaround this issue in move_op. */ |
4441 | gcc_assert (!INSN_IN_STREAM_P (EXPR_INSN_RTX (expr)))((void)(!(!(PREV_INSN ((((((expr)->vinsn))->insn_rtx))) && NEXT_INSN ((((((expr)->vinsn))->insn_rtx))) )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4441, __FUNCTION__), 0 : 0)); |
4442 | |
4443 | if (EXPR_WAS_RENAMED (expr)((expr)->was_renamed)) |
4444 | { |
4445 | unsigned regno = expr_dest_regno (expr); |
4446 | |
4447 | if (HARD_REGISTER_NUM_P (regno)((regno) < 76)) |
4448 | { |
4449 | df_set_regs_ever_live (regno, true); |
4450 | reg_rename_tick[regno] = ++reg_rename_this_tick; |
4451 | } |
4452 | } |
4453 | |
4454 | return sel_gen_insn_from_expr_after (expr, vinsn, seqno, |
4455 | place_to_insert); |
4456 | } |
4457 | |
4458 | /* Return TRUE if BB can hold bookkeeping code. */ |
4459 | static bool |
4460 | block_valid_for_bookkeeping_p (basic_block bb) |
4461 | { |
4462 | insn_t bb_end = BB_END (bb)(bb)->il.x.rtl->end_; |
4463 | |
4464 | if (!in_current_region_p (bb) || EDGE_COUNT (bb->succs)vec_safe_length (bb->succs) > 1) |
4465 | return false; |
4466 | |
4467 | if (INSN_P (bb_end)(((((enum rtx_code) (bb_end)->code) == INSN) || (((enum rtx_code ) (bb_end)->code) == JUMP_INSN) || (((enum rtx_code) (bb_end )->code) == CALL_INSN)) || (((enum rtx_code) (bb_end)-> code) == DEBUG_INSN))) |
4468 | { |
4469 | if (INSN_SCHED_TIMES (bb_end)((((&(&s_i_d[(sched_luids[INSN_UID (bb_end)])])->expr ))->sched_times)) > 0) |
4470 | return false; |
4471 | } |
4472 | else |
4473 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (bb_end))((void)(!(((((enum rtx_code) (bb_end)->code) == NOTE) && (((bb_end)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4473, __FUNCTION__), 0 : 0)); |
4474 | |
4475 | return true; |
4476 | } |
4477 | |
4478 | /* Attempt to find a block that can hold bookkeeping code for path(s) incoming |
4479 | into E2->dest, except from E1->src (there may be a sequence of empty basic |
4480 | blocks between E1->src and E2->dest). Return found block, or NULL if new |
4481 | one must be created. If LAX holds, don't assume there is a simple path |
4482 | from E1->src to E2->dest. */ |
4483 | static basic_block |
4484 | find_block_for_bookkeeping (edge e1, edge e2, bool lax) |
4485 | { |
4486 | basic_block candidate_block = NULLnullptr; |
4487 | edge e; |
4488 | |
4489 | /* Loop over edges from E1 to E2, inclusive. */ |
4490 | for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr); e = |
4491 | EDGE_SUCC (e->dest, 0)(*(e->dest)->succs)[(0)]) |
4492 | { |
4493 | if (EDGE_COUNT (e->dest->preds)vec_safe_length (e->dest->preds) == 2) |
4494 | { |
4495 | if (candidate_block == NULLnullptr) |
4496 | candidate_block = (EDGE_PRED (e->dest, 0)(*(e->dest)->preds)[(0)] == e |
4497 | ? EDGE_PRED (e->dest, 1)(*(e->dest)->preds)[(1)]->src |
4498 | : EDGE_PRED (e->dest, 0)(*(e->dest)->preds)[(0)]->src); |
4499 | else |
4500 | /* Found additional edge leading to path from e1 to e2 |
4501 | from aside. */ |
4502 | return NULLnullptr; |
4503 | } |
4504 | else if (EDGE_COUNT (e->dest->preds)vec_safe_length (e->dest->preds) > 2) |
4505 | /* Several edges leading to path from e1 to e2 from aside. */ |
4506 | return NULLnullptr; |
4507 | |
4508 | if (e == e2) |
4509 | return ((!lax || candidate_block) |
4510 | && block_valid_for_bookkeeping_p (candidate_block) |
4511 | ? candidate_block |
4512 | : NULLnullptr); |
4513 | |
4514 | if (lax && EDGE_COUNT (e->dest->succs)vec_safe_length (e->dest->succs) != 1) |
4515 | return NULLnullptr; |
4516 | } |
4517 | |
4518 | if (lax) |
4519 | return NULLnullptr; |
4520 | |
4521 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4521, __FUNCTION__)); |
4522 | } |
4523 | |
4524 | /* Create new basic block for bookkeeping code for path(s) incoming into |
4525 | E2->dest, except from E1->src. Return created block. */ |
4526 | static basic_block |
4527 | create_block_for_bookkeeping (edge e1, edge e2) |
4528 | { |
4529 | basic_block new_bb, bb = e2->dest; |
4530 | |
4531 | /* Check that we don't spoil the loop structure. */ |
4532 | if (current_loop_nest) |
4533 | { |
4534 | basic_block latch = current_loop_nest->latch; |
4535 | |
4536 | /* We do not split header. */ |
4537 | gcc_assert (e2->dest != current_loop_nest->header)((void)(!(e2->dest != current_loop_nest->header) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4537, __FUNCTION__), 0 : 0)); |
4538 | |
4539 | /* We do not redirect the only edge to the latch block. */ |
4540 | gcc_assert (e1->dest != latch((void)(!(e1->dest != latch || !single_pred_p (latch) || e1 != single_pred_edge (latch)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4542, __FUNCTION__), 0 : 0)) |
4541 | || !single_pred_p (latch)((void)(!(e1->dest != latch || !single_pred_p (latch) || e1 != single_pred_edge (latch)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4542, __FUNCTION__), 0 : 0)) |
4542 | || e1 != single_pred_edge (latch))((void)(!(e1->dest != latch || !single_pred_p (latch) || e1 != single_pred_edge (latch)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4542, __FUNCTION__), 0 : 0)); |
4543 | } |
4544 | |
4545 | /* Split BB to insert BOOK_INSN there. */ |
4546 | new_bb = sched_split_block (bb, NULLnullptr); |
4547 | |
4548 | /* Move note_list from the upper bb. */ |
4549 | gcc_assert (BB_NOTE_LIST (new_bb) == NULL_RTX)((void)(!(((&sel_region_bb_info[(new_bb)->index])-> note_list) == (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4549, __FUNCTION__), 0 : 0)); |
4550 | BB_NOTE_LIST (new_bb)((&sel_region_bb_info[(new_bb)->index])->note_list) = BB_NOTE_LIST (bb)((&sel_region_bb_info[(bb)->index])->note_list); |
4551 | BB_NOTE_LIST (bb)((&sel_region_bb_info[(bb)->index])->note_list) = NULLnullptr; |
4552 | |
4553 | gcc_assert (e2->dest == bb)((void)(!(e2->dest == bb) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4553, __FUNCTION__), 0 : 0)); |
4554 | |
4555 | /* Skip block for bookkeeping copy when leaving E1->src. */ |
4556 | if (e1->flags & EDGE_FALLTHRU) |
4557 | sel_redirect_edge_and_branch_force (e1, new_bb); |
4558 | else |
4559 | sel_redirect_edge_and_branch (e1, new_bb); |
4560 | |
4561 | gcc_assert (e1->dest == new_bb)((void)(!(e1->dest == new_bb) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4561, __FUNCTION__), 0 : 0)); |
4562 | gcc_assert (sel_bb_empty_p (bb))((void)(!(sel_bb_empty_p (bb)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4562, __FUNCTION__), 0 : 0)); |
4563 | |
4564 | /* To keep basic block numbers in sync between debug and non-debug |
4565 | compilations, we have to rotate blocks here. Consider that we |
4566 | started from (a,b)->d, (c,d)->e, and d contained only debug |
4567 | insns. It would have been removed before if the debug insns |
4568 | weren't there, so we'd have split e rather than d. So what we do |
4569 | now is to swap the block numbers of new_bb and |
4570 | single_succ(new_bb) == e, so that the insns that were in e before |
4571 | get the new block number. */ |
4572 | |
4573 | if (MAY_HAVE_DEBUG_INSNS(global_options.x_debug_nonbind_markers_p || global_options.x_flag_var_tracking_assignments )) |
4574 | { |
4575 | basic_block succ; |
4576 | insn_t insn = sel_bb_head (new_bb); |
4577 | insn_t last; |
4578 | |
4579 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) |
4580 | && single_succ_p (new_bb) |
4581 | && (succ = single_succ (new_bb)) |
4582 | && succ != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
4583 | && DEBUG_INSN_P ((last = sel_bb_end (new_bb)))(((enum rtx_code) ((last = sel_bb_end (new_bb)))->code) == DEBUG_INSN)) |
4584 | { |
4585 | while (insn != last && (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) || NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE))) |
4586 | insn = NEXT_INSN (insn); |
4587 | |
4588 | if (insn == last) |
4589 | { |
4590 | sel_global_bb_info_def gbi; |
4591 | sel_region_bb_info_def rbi; |
4592 | |
4593 | if (sched_verbose >= 2) |
4594 | sel_print ("Swapping block ids %i and %i\n", |
4595 | new_bb->index, succ->index); |
4596 | |
4597 | std::swap (new_bb->index, succ->index); |
4598 | |
4599 | SET_BASIC_BLOCK_FOR_FN (cfun, new_bb->index, new_bb)((*(((cfun + 0))->cfg->x_basic_block_info))[(new_bb-> index)] = (new_bb)); |
4600 | SET_BASIC_BLOCK_FOR_FN (cfun, succ->index, succ)((*(((cfun + 0))->cfg->x_basic_block_info))[(succ->index )] = (succ)); |
4601 | |
4602 | memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb)(&sel_global_bb_info[(new_bb)->index]), sizeof (gbi)); |
4603 | memcpy (SEL_GLOBAL_BB_INFO (new_bb)(&sel_global_bb_info[(new_bb)->index]), SEL_GLOBAL_BB_INFO (succ)(&sel_global_bb_info[(succ)->index]), |
4604 | sizeof (gbi)); |
4605 | memcpy (SEL_GLOBAL_BB_INFO (succ)(&sel_global_bb_info[(succ)->index]), &gbi, sizeof (gbi)); |
4606 | |
4607 | memcpy (&rbi, SEL_REGION_BB_INFO (new_bb)(&sel_region_bb_info[(new_bb)->index]), sizeof (rbi)); |
4608 | memcpy (SEL_REGION_BB_INFO (new_bb)(&sel_region_bb_info[(new_bb)->index]), SEL_REGION_BB_INFO (succ)(&sel_region_bb_info[(succ)->index]), |
4609 | sizeof (rbi)); |
4610 | memcpy (SEL_REGION_BB_INFO (succ)(&sel_region_bb_info[(succ)->index]), &rbi, sizeof (rbi)); |
4611 | |
4612 | std::swap (BLOCK_TO_BB (new_bb->index)(block_to_bb[new_bb->index]), |
4613 | BLOCK_TO_BB (succ->index)(block_to_bb[succ->index])); |
4614 | |
4615 | std::swap (CONTAINING_RGN (new_bb->index)(containing_rgn[new_bb->index]), |
4616 | CONTAINING_RGN (succ->index)(containing_rgn[succ->index])); |
4617 | |
4618 | for (int i = 0; i < current_nr_blocks; i++) |
4619 | if (BB_TO_BLOCK (i)(rgn_bb_table[ebb_head[i]]) == succ->index) |
4620 | BB_TO_BLOCK (i)(rgn_bb_table[ebb_head[i]]) = new_bb->index; |
4621 | else if (BB_TO_BLOCK (i)(rgn_bb_table[ebb_head[i]]) == new_bb->index) |
4622 | BB_TO_BLOCK (i)(rgn_bb_table[ebb_head[i]]) = succ->index; |
4623 | |
4624 | FOR_BB_INSNS (new_bb, insn)for ((insn) = (new_bb)->il.x.head_; (insn) && (insn ) != NEXT_INSN ((new_bb)->il.x.rtl->end_); (insn) = NEXT_INSN (insn)) |
4625 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
4626 | EXPR_ORIG_BB_INDEX (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->orig_bb_index) = new_bb->index; |
4627 | |
4628 | FOR_BB_INSNS (succ, insn)for ((insn) = (succ)->il.x.head_; (insn) && (insn) != NEXT_INSN ((succ)->il.x.rtl->end_); (insn) = NEXT_INSN (insn)) |
4629 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
4630 | EXPR_ORIG_BB_INDEX (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->orig_bb_index) = succ->index; |
4631 | |
4632 | if (bitmap_clear_bit (code_motion_visited_blocks, new_bb->index)) |
4633 | bitmap_set_bit (code_motion_visited_blocks, succ->index); |
4634 | |
4635 | gcc_assert (LABEL_P (BB_HEAD (new_bb))((void)(!((((enum rtx_code) ((new_bb)->il.x.head_)->code ) == CODE_LABEL) && (((enum rtx_code) ((succ)->il. x.head_)->code) == CODE_LABEL)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4636, __FUNCTION__), 0 : 0)) |
4636 | && LABEL_P (BB_HEAD (succ)))((void)(!((((enum rtx_code) ((new_bb)->il.x.head_)->code ) == CODE_LABEL) && (((enum rtx_code) ((succ)->il. x.head_)->code) == CODE_LABEL)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4636, __FUNCTION__), 0 : 0)); |
4637 | |
4638 | if (sched_verbose >= 4) |
4639 | sel_print ("Swapping code labels %i and %i\n", |
4640 | CODE_LABEL_NUMBER (BB_HEAD (new_bb))((((new_bb)->il.x.head_)->u.fld[5]).rt_int), |
4641 | CODE_LABEL_NUMBER (BB_HEAD (succ))((((succ)->il.x.head_)->u.fld[5]).rt_int)); |
4642 | |
4643 | std::swap (CODE_LABEL_NUMBER (BB_HEAD (new_bb))((((new_bb)->il.x.head_)->u.fld[5]).rt_int), |
4644 | CODE_LABEL_NUMBER (BB_HEAD (succ))((((succ)->il.x.head_)->u.fld[5]).rt_int)); |
4645 | } |
4646 | } |
4647 | } |
4648 | |
4649 | return bb; |
4650 | } |
4651 | |
4652 | /* Return insn after which we must insert bookkeeping code for path(s) incoming |
4653 | into E2->dest, except from E1->src. If the returned insn immediately |
4654 | precedes a fence, assign that fence to *FENCE_TO_REWIND. */ |
4655 | static insn_t |
4656 | find_place_for_bookkeeping (edge e1, edge e2, fence_t *fence_to_rewind) |
4657 | { |
4658 | insn_t place_to_insert; |
4659 | /* Find a basic block that can hold bookkeeping. If it can be found, do not |
4660 | create new basic block, but insert bookkeeping there. */ |
4661 | basic_block book_block = find_block_for_bookkeeping (e1, e2, FALSEfalse); |
4662 | |
4663 | if (book_block) |
4664 | { |
4665 | place_to_insert = BB_END (book_block)(book_block)->il.x.rtl->end_; |
4666 | |
4667 | /* Don't use a block containing only debug insns for |
4668 | bookkeeping, this causes scheduling differences between debug |
4669 | and non-debug compilations, for the block would have been |
4670 | removed already. */ |
4671 | if (DEBUG_INSN_P (place_to_insert)(((enum rtx_code) (place_to_insert)->code) == DEBUG_INSN)) |
4672 | { |
4673 | rtx_insn *insn = sel_bb_head (book_block); |
4674 | |
4675 | while (insn != place_to_insert && |
4676 | (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) || NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE))) |
4677 | insn = NEXT_INSN (insn); |
4678 | |
4679 | if (insn == place_to_insert) |
4680 | book_block = NULLnullptr; |
4681 | } |
4682 | } |
4683 | |
4684 | if (!book_block) |
4685 | { |
4686 | book_block = create_block_for_bookkeeping (e1, e2); |
4687 | place_to_insert = BB_END (book_block)(book_block)->il.x.rtl->end_; |
4688 | if (sched_verbose >= 9) |
4689 | sel_print ("New block is %i, split from bookkeeping block %i\n", |
4690 | EDGE_SUCC (book_block, 0)(*(book_block)->succs)[(0)]->dest->index, book_block->index); |
4691 | } |
4692 | else |
4693 | { |
4694 | if (sched_verbose >= 9) |
4695 | sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index); |
4696 | } |
4697 | |
4698 | *fence_to_rewind = NULLnullptr; |
4699 | /* If basic block ends with a jump, insert bookkeeping code right before it. |
4700 | Notice if we are crossing a fence when taking PREV_INSN. */ |
4701 | if (INSN_P (place_to_insert)(((((enum rtx_code) (place_to_insert)->code) == INSN) || ( ((enum rtx_code) (place_to_insert)->code) == JUMP_INSN) || (((enum rtx_code) (place_to_insert)->code) == CALL_INSN)) || (((enum rtx_code) (place_to_insert)->code) == DEBUG_INSN )) && control_flow_insn_p (place_to_insert)) |
4702 | { |
4703 | *fence_to_rewind = flist_lookup (fences, place_to_insert); |
4704 | place_to_insert = PREV_INSN (place_to_insert); |
4705 | } |
4706 | |
4707 | return place_to_insert; |
4708 | } |
4709 | |
4710 | /* Find a proper seqno for bookkeeing insn inserted at PLACE_TO_INSERT |
4711 | for JOIN_POINT. */ |
4712 | static int |
4713 | find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point) |
4714 | { |
4715 | int seqno; |
4716 | |
4717 | /* Check if we are about to insert bookkeeping copy before a jump, and use |
4718 | jump's seqno for the copy; otherwise, use JOIN_POINT's seqno. */ |
4719 | rtx_insn *next = NEXT_INSN (place_to_insert); |
4720 | if (INSN_P (next)(((((enum rtx_code) (next)->code) == INSN) || (((enum rtx_code ) (next)->code) == JUMP_INSN) || (((enum rtx_code) (next)-> code) == CALL_INSN)) || (((enum rtx_code) (next)->code) == DEBUG_INSN)) |
4721 | && JUMP_P (next)(((enum rtx_code) (next)->code) == JUMP_INSN) |
4722 | && BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert)) |
4723 | { |
4724 | gcc_assert (INSN_SCHED_TIMES (next) == 0)((void)(!(((((&(&s_i_d[(sched_luids[INSN_UID (next)]) ])->expr))->sched_times)) == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4724, __FUNCTION__), 0 : 0)); |
4725 | seqno = INSN_SEQNO (next)((&s_i_d[(sched_luids[INSN_UID (next)])])->seqno); |
4726 | } |
4727 | else if (INSN_SEQNO (join_point)((&s_i_d[(sched_luids[INSN_UID (join_point)])])->seqno ) > 0) |
4728 | seqno = INSN_SEQNO (join_point)((&s_i_d[(sched_luids[INSN_UID (join_point)])])->seqno ); |
4729 | else |
4730 | { |
4731 | seqno = get_seqno_by_preds (place_to_insert); |
4732 | |
4733 | /* Sometimes the fences can move in such a way that there will be |
4734 | no instructions with positive seqno around this bookkeeping. |
4735 | This means that there will be no way to get to it by a regular |
4736 | fence movement. Never mind because we pick up such pieces for |
4737 | rescheduling anyways, so any positive value will do for now. */ |
4738 | if (seqno < 0) |
4739 | { |
4740 | gcc_assert (pipelining_p)((void)(!(pipelining_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4740, __FUNCTION__), 0 : 0)); |
4741 | seqno = 1; |
4742 | } |
4743 | } |
4744 | |
4745 | gcc_assert (seqno > 0)((void)(!(seqno > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4745, __FUNCTION__), 0 : 0)); |
4746 | return seqno; |
4747 | } |
4748 | |
4749 | /* Insert bookkeeping copy of C_EXPS's insn after PLACE_TO_INSERT, assigning |
4750 | NEW_SEQNO to it. Return created insn. */ |
4751 | static insn_t |
4752 | emit_bookkeeping_insn (insn_t place_to_insert, expr_t c_expr, int new_seqno) |
4753 | { |
4754 | rtx_insn *new_insn_rtx = create_copy_of_insn_rtx (EXPR_INSN_RTX (c_expr)(((((c_expr)->vinsn))->insn_rtx))); |
4755 | |
4756 | vinsn_t new_vinsn |
4757 | = create_vinsn_from_insn_rtx (new_insn_rtx, |
4758 | VINSN_UNIQUE_P (EXPR_VINSN (c_expr))(!((((((&((((c_expr)->vinsn))->id)))->type)) == SET ) || ((((&((((c_expr)->vinsn))->id)))->type)) == USE))); |
4759 | |
4760 | insn_t new_insn = emit_insn_from_expr_after (c_expr, new_vinsn, new_seqno, |
4761 | place_to_insert); |
4762 | |
4763 | INSN_SCHED_TIMES (new_insn)((((&(&s_i_d[(sched_luids[INSN_UID (new_insn)])])-> expr))->sched_times)) = 0; |
4764 | bitmap_set_bit (current_copies, INSN_UID (new_insn)); |
4765 | |
4766 | return new_insn; |
4767 | } |
4768 | |
4769 | /* Generate a bookkeeping copy of C_EXPR's insn for path(s) incoming into to |
4770 | E2->dest, except from E1->src (there may be a sequence of empty blocks |
4771 | between E1->src and E2->dest). Return block containing the copy. |
4772 | All scheduler data is initialized for the newly created insn. */ |
4773 | static basic_block |
4774 | generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2) |
4775 | { |
4776 | insn_t join_point, place_to_insert, new_insn; |
4777 | int new_seqno; |
4778 | bool need_to_exchange_data_sets; |
4779 | fence_t fence_to_rewind; |
4780 | |
4781 | if (sched_verbose >= 4) |
4782 | sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index, |
4783 | e2->dest->index); |
4784 | |
4785 | join_point = sel_bb_head (e2->dest); |
4786 | place_to_insert = find_place_for_bookkeeping (e1, e2, &fence_to_rewind); |
4787 | new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point); |
4788 | need_to_exchange_data_sets |
4789 | = sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert)); |
4790 | |
4791 | new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno); |
4792 | |
4793 | if (fence_to_rewind) |
4794 | FENCE_INSN (fence_to_rewind)((fence_to_rewind)->insn) = new_insn; |
4795 | |
4796 | /* When inserting bookkeeping insn in new block, av sets should be |
4797 | following: old basic block (that now holds bookkeeping) data sets are |
4798 | the same as was before generation of bookkeeping, and new basic block |
4799 | (that now hold all other insns of old basic block) data sets are |
4800 | invalid. So exchange data sets for these basic blocks as sel_split_block |
4801 | mistakenly exchanges them in this case. Cannot do it earlier because |
4802 | when single instruction is added to new basic block it should hold NULL |
4803 | lv_set. */ |
4804 | if (need_to_exchange_data_sets) |
4805 | exchange_data_sets (BLOCK_FOR_INSN (new_insn), |
4806 | BLOCK_FOR_INSN (join_point)); |
4807 | |
4808 | stat_bookkeeping_copies++; |
4809 | return BLOCK_FOR_INSN (new_insn); |
4810 | } |
4811 | |
4812 | /* Remove from AV_PTR all insns that may need bookkeeping when scheduling |
4813 | on FENCE, but we are unable to copy them. */ |
4814 | static void |
4815 | remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr) |
4816 | { |
4817 | expr_t expr; |
4818 | av_set_iterator i; |
4819 | |
4820 | /* An expression does not need bookkeeping if it is available on all paths |
4821 | from current block to original block and current block dominates |
4822 | original block. We check availability on all paths by examining |
4823 | EXPR_SPEC; this is not equivalent, because it may be positive even |
4824 | if expr is available on all paths (but if expr is not available on |
4825 | any path, EXPR_SPEC will be positive). */ |
4826 | |
4827 | FOR_EACH_EXPR_1 (expr, i, av_ptr)for (_list_iter_start (&((i)), ((av_ptr)), true); _list_iter_cond_expr (*((i)).lp, &((expr))); _list_iter_next (&((i)))) |
4828 | { |
4829 | if (!control_flow_insn_p (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx))) |
4830 | && (!bookkeeping_p || VINSN_UNIQUE_P (EXPR_VINSN (expr))(!((((((&((((expr)->vinsn))->id)))->type)) == SET ) || ((((&((((expr)->vinsn))->id)))->type)) == USE ))) |
4831 | && (EXPR_SPEC (expr)((expr)->spec) |
4832 | || !EXPR_ORIG_BB_INDEX (expr)((expr)->orig_bb_index) |
4833 | || !dominated_by_p (CDI_DOMINATORS, |
4834 | BASIC_BLOCK_FOR_FN (cfun,((*(((cfun + 0))->cfg->x_basic_block_info))[(((expr)-> orig_bb_index))]) |
4835 | EXPR_ORIG_BB_INDEX (expr))((*(((cfun + 0))->cfg->x_basic_block_info))[(((expr)-> orig_bb_index))]), |
4836 | BLOCK_FOR_INSN (FENCE_INSN (fence)((fence)->insn))))) |
4837 | { |
4838 | if (sched_verbose >= 4) |
4839 | sel_print ("Expr %d removed because it would need bookkeeping, which " |
4840 | "cannot be created\n", INSN_UID (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)))); |
4841 | av_set_iter_remove (&i); |
4842 | } |
4843 | } |
4844 | } |
4845 | |
4846 | /* Moving conditional jump through some instructions. |
4847 | |
4848 | Consider example: |
4849 | |
4850 | ... <- current scheduling point |
4851 | NOTE BASIC BLOCK: <- bb header |
4852 | (p8) add r14=r14+0x9;; |
4853 | (p8) mov [r14]=r23 |
4854 | (!p8) jump L1;; |
4855 | NOTE BASIC BLOCK: |
4856 | ... |
4857 | |
4858 | We can schedule jump one cycle earlier, than mov, because they cannot be |
4859 | executed together as their predicates are mutually exclusive. |
4860 | |
4861 | This is done in this way: first, new fallthrough basic block is created |
4862 | after jump (it is always can be done, because there already should be a |
4863 | fallthrough block, where control flow goes in case of predicate being true - |
4864 | in our example; otherwise there should be a dependence between those |
4865 | instructions and jump and we cannot schedule jump right now); |
4866 | next, all instructions between jump and current scheduling point are moved |
4867 | to this new block. And the result is this: |
4868 | |
4869 | NOTE BASIC BLOCK: |
4870 | (!p8) jump L1 <- current scheduling point |
4871 | NOTE BASIC BLOCK: <- bb header |
4872 | (p8) add r14=r14+0x9;; |
4873 | (p8) mov [r14]=r23 |
4874 | NOTE BASIC BLOCK: |
4875 | ... |
4876 | */ |
4877 | static void |
4878 | move_cond_jump (rtx_insn *insn, bnd_t bnd) |
4879 | { |
4880 | edge ft_edge; |
4881 | basic_block block_from, block_next, block_new, block_bnd, bb; |
4882 | rtx_insn *next, *prev, *link, *head; |
4883 | |
4884 | block_from = BLOCK_FOR_INSN (insn); |
4885 | block_bnd = BLOCK_FOR_INSN (BND_TO (bnd)((bnd)->to)); |
4886 | prev = BND_TO (bnd)((bnd)->to); |
4887 | |
4888 | /* Moving of jump should not cross any other jumps or beginnings of new |
4889 | basic blocks. The only exception is when we move a jump through |
4890 | mutually exclusive insns along fallthru edges. */ |
4891 | if (flag_checkingglobal_options.x_flag_checking && block_from != block_bnd) |
4892 | { |
4893 | bb = block_from; |
4894 | for (link = PREV_INSN (insn); link != PREV_INSN (prev); |
4895 | link = PREV_INSN (link)) |
4896 | { |
4897 | if (INSN_P (link)(((((enum rtx_code) (link)->code) == INSN) || (((enum rtx_code ) (link)->code) == JUMP_INSN) || (((enum rtx_code) (link)-> code) == CALL_INSN)) || (((enum rtx_code) (link)->code) == DEBUG_INSN))) |
4898 | gcc_assert (sched_insns_conditions_mutex_p (insn, link))((void)(!(sched_insns_conditions_mutex_p (insn, link)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4898, __FUNCTION__), 0 : 0)); |
4899 | if (BLOCK_FOR_INSN (link) && BLOCK_FOR_INSN (link) != bb) |
4900 | { |
4901 | gcc_assert (single_pred (bb) == BLOCK_FOR_INSN (link))((void)(!(single_pred (bb) == BLOCK_FOR_INSN (link)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4901, __FUNCTION__), 0 : 0)); |
4902 | bb = BLOCK_FOR_INSN (link); |
4903 | } |
4904 | } |
4905 | } |
4906 | |
4907 | /* Jump is moved to the boundary. */ |
4908 | next = PREV_INSN (insn); |
4909 | BND_TO (bnd)((bnd)->to) = insn; |
4910 | |
4911 | ft_edge = find_fallthru_edge_from (block_from); |
4912 | block_next = ft_edge->dest; |
4913 | /* There must be a fallthrough block (or where should go |
4914 | control flow in case of false jump predicate otherwise?). */ |
4915 | gcc_assert (block_next)((void)(!(block_next) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4915, __FUNCTION__), 0 : 0)); |
4916 | |
4917 | /* Create new empty basic block after source block. */ |
4918 | block_new = sel_split_edge (ft_edge); |
4919 | gcc_assert (block_new->next_bb == block_next((void)(!(block_new->next_bb == block_next && block_from ->next_bb == block_new) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4920, __FUNCTION__), 0 : 0)) |
4920 | && block_from->next_bb == block_new)((void)(!(block_new->next_bb == block_next && block_from ->next_bb == block_new) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4920, __FUNCTION__), 0 : 0)); |
4921 | |
4922 | /* Move all instructions except INSN to BLOCK_NEW. */ |
4923 | bb = block_bnd; |
4924 | head = BB_HEAD (block_new)(block_new)->il.x.head_; |
4925 | while (bb != block_from->next_bb) |
4926 | { |
4927 | rtx_insn *from, *to; |
4928 | from = bb == block_bnd ? prev : sel_bb_head (bb); |
4929 | to = bb == block_from ? next : sel_bb_end (bb); |
4930 | |
4931 | /* The jump being moved can be the first insn in the block. |
4932 | In this case we don't have to move anything in this block. */ |
4933 | if (NEXT_INSN (to) != from) |
4934 | { |
4935 | reorder_insns (from, to, head); |
4936 | |
4937 | for (link = to; link != head; link = PREV_INSN (link)) |
4938 | EXPR_ORIG_BB_INDEX (INSN_EXPR (link))(((&(&s_i_d[(sched_luids[INSN_UID (link)])])->expr ))->orig_bb_index) = block_new->index; |
4939 | head = to; |
4940 | } |
4941 | |
4942 | /* Cleanup possibly empty blocks left. */ |
4943 | block_next = bb->next_bb; |
4944 | if (bb != block_from) |
4945 | tidy_control_flow (bb, false); |
4946 | bb = block_next; |
4947 | } |
4948 | |
4949 | /* Assert there is no jump to BLOCK_NEW, only fallthrough edge. */ |
4950 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (block_new)))((void)(!(((((enum rtx_code) ((block_new)->il.x.head_)-> code) == NOTE) && ((((block_new)->il.x.head_)-> u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4950, __FUNCTION__), 0 : 0)); |
4951 | |
4952 | gcc_assert (!sel_bb_empty_p (block_from)((void)(!(!sel_bb_empty_p (block_from) && !sel_bb_empty_p (block_new)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4953, __FUNCTION__), 0 : 0)) |
4953 | && !sel_bb_empty_p (block_new))((void)(!(!sel_bb_empty_p (block_from) && !sel_bb_empty_p (block_new)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4953, __FUNCTION__), 0 : 0)); |
4954 | |
4955 | /* Update data sets for BLOCK_NEW to represent that INSN and |
4956 | instructions from the other branch of INSN is no longer |
4957 | available at BLOCK_NEW. */ |
4958 | BB_AV_LEVEL (block_new)((&sel_region_bb_info[(block_new)->index])->av_level ) = global_level; |
4959 | gcc_assert (BB_LV_SET (block_new) == NULL)((void)(!(((&sel_global_bb_info[(block_new)->index])-> lv_set) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4959, __FUNCTION__), 0 : 0)); |
4960 | BB_LV_SET (block_new)((&sel_global_bb_info[(block_new)->index])->lv_set) = get_clear_regset_from_pool (); |
4961 | update_data_sets (sel_bb_head (block_new)); |
4962 | |
4963 | /* INSN is a new basic block header - so prepare its data |
4964 | structures and update availability and liveness sets. */ |
4965 | update_data_sets (insn); |
4966 | |
4967 | if (sched_verbose >= 4) |
4968 | sel_print ("Moving jump %d\n", INSN_UID (insn)); |
4969 | } |
4970 | |
4971 | /* Remove nops generated during move_op for preventing removal of empty |
4972 | basic blocks. */ |
4973 | static void |
4974 | remove_temp_moveop_nops (bool full_tidying) |
4975 | { |
4976 | int i; |
4977 | insn_t insn; |
4978 | |
4979 | FOR_EACH_VEC_ELT (vec_temp_moveop_nops, i, insn)for (i = 0; (vec_temp_moveop_nops).iterate ((i), &(insn)) ; ++(i)) |
4980 | { |
4981 | gcc_assert (INSN_NOP_P (insn))((void)(!((PATTERN (insn) == nop_pattern)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 4981, __FUNCTION__), 0 : 0)); |
4982 | return_nop_to_pool (insn, full_tidying); |
4983 | } |
4984 | |
4985 | /* Empty the vector. */ |
4986 | if (vec_temp_moveop_nops.length () > 0) |
4987 | vec_temp_moveop_nops.block_remove (0, vec_temp_moveop_nops.length ()); |
4988 | } |
4989 | |
4990 | /* Records the maximal UID before moving up an instruction. Used for |
4991 | distinguishing between bookkeeping copies and original insns. */ |
4992 | static int max_uid_before_move_op = 0; |
4993 | |
4994 | /* When true, we're always scheduling next insn on the already scheduled code |
4995 | to get the right insn data for the following bundling or other passes. */ |
4996 | static int force_next_insn = 0; |
4997 | |
4998 | /* Remove from AV_VLIW_P all instructions but next when debug counter |
4999 | tells us so. Next instruction is fetched from BNDS. */ |
5000 | static void |
5001 | remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p) |
5002 | { |
5003 | if (! dbg_cnt (sel_sched_insn_cnt) || force_next_insn) |
5004 | /* Leave only the next insn in av_vliw. */ |
5005 | { |
5006 | av_set_iterator av_it; |
5007 | expr_t expr; |
5008 | bnd_t bnd = BLIST_BND (bnds)(&(bnds)->u.bnd); |
5009 | insn_t next = BND_TO (bnd)((bnd)->to); |
5010 | |
5011 | gcc_assert (BLIST_NEXT (bnds) == NULL)((void)(!((((bnds)->next)) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5011, __FUNCTION__), 0 : 0)); |
5012 | |
5013 | FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p)for (_list_iter_start (&((av_it)), ((av_vliw_p)), true); _list_iter_cond_expr (*((av_it)).lp, &((expr))); _list_iter_next (&((av_it )))) |
5014 | if (EXPR_INSN_RTX (expr)(((((expr)->vinsn))->insn_rtx)) != next) |
5015 | av_set_iter_remove (&av_it); |
5016 | } |
5017 | } |
5018 | |
5019 | /* Compute available instructions on BNDS. FENCE is the current fence. Write |
5020 | the computed set to *AV_VLIW_P. */ |
5021 | static void |
5022 | compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p) |
5023 | { |
5024 | if (sched_verbose >= 2) |
5025 | { |
5026 | sel_print ("Boundaries: "); |
5027 | dump_blist (bnds); |
5028 | sel_print ("\n"); |
5029 | } |
5030 | |
5031 | for (; bnds; bnds = BLIST_NEXT (bnds)(((bnds)->next))) |
5032 | { |
5033 | bnd_t bnd = BLIST_BND (bnds)(&(bnds)->u.bnd); |
5034 | av_set_t av1_copy; |
5035 | insn_t bnd_to = BND_TO (bnd)((bnd)->to); |
5036 | |
5037 | /* Rewind BND->TO to the basic block header in case some bookkeeping |
5038 | instructions were inserted before BND->TO and it needs to be |
5039 | adjusted. */ |
5040 | if (sel_bb_head_p (bnd_to)) |
5041 | gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0)((void)(!(((((&(&s_i_d[(sched_luids[INSN_UID (bnd_to) ])])->expr))->sched_times)) == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5041, __FUNCTION__), 0 : 0)); |
5042 | else |
5043 | while (INSN_SCHED_TIMES (PREV_INSN (bnd_to))((((&(&s_i_d[(sched_luids[INSN_UID (PREV_INSN (bnd_to ))])])->expr))->sched_times)) == 0) |
5044 | { |
5045 | bnd_to = PREV_INSN (bnd_to); |
5046 | if (sel_bb_head_p (bnd_to)) |
5047 | break; |
5048 | } |
5049 | |
5050 | if (BND_TO (bnd)((bnd)->to) != bnd_to) |
5051 | { |
5052 | gcc_assert (FENCE_INSN (fence) == BND_TO (bnd))((void)(!(((fence)->insn) == ((bnd)->to)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5052, __FUNCTION__), 0 : 0)); |
5053 | FENCE_INSN (fence)((fence)->insn) = bnd_to; |
5054 | BND_TO (bnd)((bnd)->to) = bnd_to; |
5055 | } |
5056 | |
5057 | av_set_clear (&BND_AV (bnd)((bnd)->av)); |
5058 | BND_AV (bnd)((bnd)->av) = compute_av_set (BND_TO (bnd)((bnd)->to), NULLnullptr, 0, true); |
5059 | |
5060 | av_set_clear (&BND_AV1 (bnd)((bnd)->av1)); |
5061 | BND_AV1 (bnd)((bnd)->av1) = av_set_copy (BND_AV (bnd)((bnd)->av)); |
5062 | |
5063 | moveup_set_inside_insn_group (&BND_AV1 (bnd)((bnd)->av1), NULLnullptr); |
5064 | |
5065 | av1_copy = av_set_copy (BND_AV1 (bnd)((bnd)->av1)); |
5066 | av_set_union_and_clear (av_vliw_p, &av1_copy, NULLnullptr); |
5067 | } |
5068 | |
5069 | if (sched_verbose >= 2) |
5070 | { |
5071 | sel_print ("Available exprs (vliw form): "); |
5072 | dump_av_set (*av_vliw_p); |
5073 | sel_print ("\n"); |
5074 | } |
5075 | } |
5076 | |
5077 | /* Calculate the sequential av set on BND corresponding to the EXPR_VLIW |
5078 | expression. When FOR_MOVEOP is true, also replace the register of |
5079 | expressions found with the register from EXPR_VLIW. */ |
5080 | static av_set_t |
5081 | find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop) |
5082 | { |
5083 | av_set_t expr_seq = NULLnullptr; |
5084 | expr_t expr; |
5085 | av_set_iterator i; |
5086 | |
5087 | FOR_EACH_EXPR (expr, i, BND_AV (bnd))for (_list_iter_start (&((i)), &((((bnd)->av))), false ); _list_iter_cond_expr (*((i)).lp, &((expr))); _list_iter_next (&((i)))) |
5088 | { |
5089 | if (equal_after_moveup_path_p (expr, NULLnullptr, expr_vliw)) |
5090 | { |
5091 | if (for_moveop) |
5092 | { |
5093 | /* The sequential expression has the right form to pass |
5094 | to move_op except when renaming happened. Put the |
5095 | correct register in EXPR then. */ |
5096 | if (EXPR_SEPARABLE_P (expr)((((((&((((expr)->vinsn))->id)))->type)) == SET) ) && REG_P (EXPR_LHS (expr))(((enum rtx_code) ((((((&((((expr)->vinsn))->id)))-> lhs))))->code) == REG)) |
5097 | { |
5098 | if (expr_dest_regno (expr) != expr_dest_regno (expr_vliw)) |
5099 | { |
5100 | replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw)(((((&((((expr_vliw)->vinsn))->id)))->lhs)))); |
5101 | stat_renamed_scheduled++; |
5102 | } |
5103 | /* Also put the correct TARGET_AVAILABLE bit on the expr. |
5104 | This is needed when renaming came up with original |
5105 | register. */ |
5106 | else if (EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) |
5107 | != EXPR_TARGET_AVAILABLE (expr_vliw)((expr_vliw)->target_available)) |
5108 | { |
5109 | gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1)((void)(!(((expr_vliw)->target_available) == 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5109, __FUNCTION__), 0 : 0)); |
5110 | EXPR_TARGET_AVAILABLE (expr)((expr)->target_available) = 1; |
5111 | } |
5112 | } |
5113 | if (EXPR_WAS_SUBSTITUTED (expr)((expr)->was_substituted)) |
5114 | stat_substitutions_total++; |
5115 | } |
5116 | |
5117 | av_set_add (&expr_seq, expr); |
5118 | |
5119 | /* With substitution inside insn group, it is possible |
5120 | that more than one expression in expr_seq will correspond |
5121 | to expr_vliw. In this case, choose one as the attempt to |
5122 | move both leads to miscompiles. */ |
5123 | break; |
5124 | } |
5125 | } |
5126 | |
5127 | if (for_moveop && sched_verbose >= 2) |
5128 | { |
5129 | sel_print ("Best expression(s) (sequential form): "); |
5130 | dump_av_set (expr_seq); |
5131 | sel_print ("\n"); |
5132 | } |
5133 | |
5134 | return expr_seq; |
5135 | } |
5136 | |
5137 | |
5138 | /* Move nop to previous block. */ |
5139 | static void ATTRIBUTE_UNUSED__attribute__ ((__unused__)) |
5140 | move_nop_to_previous_block (insn_t nop, basic_block prev_bb) |
5141 | { |
5142 | insn_t prev_insn, next_insn; |
5143 | |
5144 | gcc_assert (sel_bb_head_p (nop)((void)(!(sel_bb_head_p (nop) && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5145, __FUNCTION__), 0 : 0)) |
5145 | && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb)((void)(!(sel_bb_head_p (nop) && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5145, __FUNCTION__), 0 : 0)); |
5146 | rtx_note *note = bb_note (BLOCK_FOR_INSN (nop)); |
5147 | prev_insn = sel_bb_end (prev_bb); |
5148 | next_insn = NEXT_INSN (nop); |
5149 | gcc_assert (prev_insn != NULL_RTX((void)(!(prev_insn != (rtx) 0 && PREV_INSN (note) == prev_insn) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5150, __FUNCTION__), 0 : 0)) |
5150 | && PREV_INSN (note) == prev_insn)((void)(!(prev_insn != (rtx) 0 && PREV_INSN (note) == prev_insn) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5150, __FUNCTION__), 0 : 0)); |
5151 | |
5152 | SET_NEXT_INSN (prev_insn) = nop; |
5153 | SET_PREV_INSN (nop) = prev_insn; |
5154 | |
5155 | SET_PREV_INSN (note) = nop; |
5156 | SET_NEXT_INSN (note) = next_insn; |
5157 | |
5158 | SET_NEXT_INSN (nop) = note; |
5159 | SET_PREV_INSN (next_insn) = note; |
5160 | |
5161 | BB_END (prev_bb)(prev_bb)->il.x.rtl->end_ = nop; |
5162 | BLOCK_FOR_INSN (nop) = prev_bb; |
5163 | } |
5164 | |
5165 | /* Prepare a place to insert the chosen expression on BND. */ |
5166 | static insn_t |
5167 | prepare_place_to_insert (bnd_t bnd) |
5168 | { |
5169 | insn_t place_to_insert; |
5170 | |
5171 | /* Init place_to_insert before calling move_op, as the later |
5172 | can possibly remove BND_TO (bnd). */ |
5173 | if (/* If this is not the first insn scheduled. */ |
5174 | BND_PTR (bnd)((bnd)->ptr)) |
5175 | { |
5176 | /* Add it after last scheduled. */ |
5177 | place_to_insert = ILIST_INSN (BND_PTR (bnd))((((bnd)->ptr))->u.insn); |
5178 | if (DEBUG_INSN_P (place_to_insert)(((enum rtx_code) (place_to_insert)->code) == DEBUG_INSN)) |
5179 | { |
5180 | ilist_t l = BND_PTR (bnd)((bnd)->ptr); |
5181 | while ((l = ILIST_NEXT (l)(((l)->next))) && |
5182 | DEBUG_INSN_P (ILIST_INSN (l))(((enum rtx_code) (((l)->u.insn))->code) == DEBUG_INSN)) |
5183 | ; |
5184 | if (!l) |
5185 | place_to_insert = NULLnullptr; |
5186 | } |
5187 | } |
5188 | else |
5189 | place_to_insert = NULLnullptr; |
5190 | |
5191 | if (!place_to_insert) |
5192 | { |
5193 | /* Add it before BND_TO. The difference is in the |
5194 | basic block, where INSN will be added. */ |
5195 | place_to_insert = get_nop_from_pool (BND_TO (bnd)((bnd)->to)); |
5196 | gcc_assert (BLOCK_FOR_INSN (place_to_insert)((void)(!(BLOCK_FOR_INSN (place_to_insert) == BLOCK_FOR_INSN ( ((bnd)->to))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5197, __FUNCTION__), 0 : 0)) |
5197 | == BLOCK_FOR_INSN (BND_TO (bnd)))((void)(!(BLOCK_FOR_INSN (place_to_insert) == BLOCK_FOR_INSN ( ((bnd)->to))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5197, __FUNCTION__), 0 : 0)); |
5198 | } |
5199 | |
5200 | return place_to_insert; |
5201 | } |
5202 | |
5203 | /* Find original instructions for EXPR_SEQ and move it to BND boundary. |
5204 | Return the expression to emit in C_EXPR. */ |
5205 | static bool |
5206 | move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw, |
5207 | av_set_t expr_seq, expr_t c_expr) |
5208 | { |
5209 | bool b, should_move; |
5210 | unsigned book_uid; |
5211 | bitmap_iterator bi; |
5212 | int n_bookkeeping_copies_before_moveop; |
5213 | |
5214 | /* Make a move. This call will remove the original operation, |
5215 | insert all necessary bookkeeping instructions and update the |
5216 | data sets. After that all we have to do is add the operation |
5217 | at before BND_TO (BND). */ |
5218 | n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies; |
5219 | max_uid_before_move_op = get_max_uid (); |
5220 | bitmap_clear (current_copies); |
5221 | bitmap_clear (current_originators); |
5222 | |
5223 | b = move_op (BND_TO (bnd)((bnd)->to), expr_seq, expr_vliw, |
5224 | get_dest_from_orig_ops (expr_seq), c_expr, &should_move); |
5225 | |
5226 | /* We should be able to find the expression we've chosen for |
5227 | scheduling. */ |
5228 | gcc_assert (b)((void)(!(b) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5228, __FUNCTION__), 0 : 0)); |
5229 | |
5230 | if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop) |
5231 | stat_insns_needed_bookkeeping++; |
5232 | |
5233 | EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)for (bmp_iter_set_init (&(bi), (current_copies), (0), & (book_uid)); bmp_iter_set (&(bi), &(book_uid)); bmp_iter_next (&(bi), &(book_uid))) |
5234 | { |
5235 | unsigned uid; |
5236 | bitmap_iterator bi; |
5237 | |
5238 | /* We allocate these bitmaps lazily. */ |
5239 | if (! INSN_ORIGINATORS_BY_UID (book_uid)((&s_i_d[(sched_luids[book_uid])])->originators)) |
5240 | INSN_ORIGINATORS_BY_UID (book_uid)((&s_i_d[(sched_luids[book_uid])])->originators) = BITMAP_ALLOCbitmap_alloc (NULLnullptr); |
5241 | |
5242 | bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid)((&s_i_d[(sched_luids[book_uid])])->originators), |
5243 | current_originators); |
5244 | |
5245 | /* Transitively add all originators' originators. */ |
5246 | EXECUTE_IF_SET_IN_BITMAP (current_originators, 0, uid, bi)for (bmp_iter_set_init (&(bi), (current_originators), (0) , &(uid)); bmp_iter_set (&(bi), &(uid)); bmp_iter_next (&(bi), &(uid))) |
5247 | if (INSN_ORIGINATORS_BY_UID (uid)((&s_i_d[(sched_luids[uid])])->originators)) |
5248 | bitmap_ior_into (INSN_ORIGINATORS_BY_UID (book_uid)((&s_i_d[(sched_luids[book_uid])])->originators), |
5249 | INSN_ORIGINATORS_BY_UID (uid)((&s_i_d[(sched_luids[uid])])->originators)); |
5250 | } |
5251 | |
5252 | return should_move; |
5253 | } |
5254 | |
5255 | |
5256 | /* Debug a DFA state as an array of bytes. */ |
5257 | static void |
5258 | debug_state (state_t state) |
5259 | { |
5260 | unsigned char *p; |
5261 | unsigned int i, size = dfa_state_size; |
5262 | |
5263 | sel_print ("state (%u):", size); |
5264 | for (i = 0, p = (unsigned char *) state; i < size; i++) |
5265 | sel_print (" %d", p[i]); |
5266 | sel_print ("\n"); |
5267 | } |
5268 | |
5269 | /* Advance state on FENCE with INSN. Return true if INSN is |
5270 | an ASM, and we should advance state once more. */ |
5271 | static bool |
5272 | advance_state_on_fence (fence_t fence, insn_t insn) |
5273 | { |
5274 | bool asm_p; |
5275 | |
5276 | if (recog_memoized (insn) >= 0) |
5277 | { |
5278 | int res; |
5279 | state_t temp_state = alloca (dfa_state_size)__builtin_alloca(dfa_state_size); |
5280 | |
5281 | gcc_assert (!INSN_ASM_P (insn))((void)(!(!((&s_i_d[(sched_luids[INSN_UID (insn)])])-> asm_p)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5281, __FUNCTION__), 0 : 0)); |
5282 | asm_p = false; |
5283 | |
5284 | memcpy (temp_state, FENCE_STATE (fence)((fence)->state), dfa_state_size); |
5285 | res = state_transition (FENCE_STATE (fence)((fence)->state), insn); |
5286 | gcc_assert (res < 0)((void)(!(res < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5286, __FUNCTION__), 0 : 0)); |
5287 | |
5288 | if (memcmp (temp_state, FENCE_STATE (fence)((fence)->state), dfa_state_size)) |
5289 | { |
5290 | FENCE_ISSUED_INSNS (fence)((fence)->cycle_issued_insns)++; |
5291 | |
5292 | /* We should never issue more than issue_rate insns. */ |
5293 | if (FENCE_ISSUED_INSNS (fence)((fence)->cycle_issued_insns) > issue_rate) |
5294 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5294, __FUNCTION__)); |
5295 | } |
5296 | } |
5297 | else |
5298 | { |
5299 | /* This could be an ASM insn which we'd like to schedule |
5300 | on the next cycle. */ |
5301 | asm_p = INSN_ASM_P (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->asm_p); |
5302 | if (!FENCE_STARTS_CYCLE_P (fence)((fence)->starts_cycle_p) && asm_p) |
5303 | advance_one_cycle (fence); |
5304 | } |
5305 | |
5306 | if (sched_verbose >= 2) |
5307 | debug_state (FENCE_STATE (fence)((fence)->state)); |
5308 | if (!DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
5309 | FENCE_STARTS_CYCLE_P (fence)((fence)->starts_cycle_p) = 0; |
5310 | FENCE_ISSUE_MORE (fence)((fence)->issue_more) = can_issue_more; |
5311 | return asm_p; |
5312 | } |
5313 | |
5314 | /* Update FENCE on which INSN was scheduled and this INSN, too. NEED_STALL |
5315 | is nonzero if we need to stall after issuing INSN. */ |
5316 | static void |
5317 | update_fence_and_insn (fence_t fence, insn_t insn, int need_stall) |
5318 | { |
5319 | bool asm_p; |
5320 | |
5321 | /* First, reflect that something is scheduled on this fence. */ |
5322 | asm_p = advance_state_on_fence (fence, insn); |
5323 | FENCE_LAST_SCHEDULED_INSN (fence)((fence)->last_scheduled_insn) = insn; |
5324 | vec_safe_push (FENCE_EXECUTING_INSNS (fence)((fence)->executing_insns), insn); |
5325 | if (SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5325, __FUNCTION__); _rtx; })->in_struct)) |
5326 | { |
5327 | FENCE_SCHED_NEXT (fence)((fence)->sched_next) = INSN_SCHED_NEXT (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->sched_next); |
5328 | SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5328, __FUNCTION__); _rtx; })->in_struct) = 0; |
5329 | } |
5330 | else |
5331 | FENCE_SCHED_NEXT (fence)((fence)->sched_next) = NULLnullptr; |
5332 | if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence)((fence)->ready_ticks_size)) |
5333 | FENCE_READY_TICKS (fence)((fence)->ready_ticks) [INSN_UID (insn)] = 0; |
5334 | |
5335 | /* Set instruction scheduling info. This will be used in bundling, |
5336 | pipelining, tick computations etc. */ |
5337 | ++INSN_SCHED_TIMES (insn)((((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->sched_times)); |
5338 | EXPR_TARGET_AVAILABLE (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->target_available) = true; |
5339 | EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn))(((&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr ))->orig_sched_cycle) = FENCE_CYCLE (fence)((fence)->cycle); |
5340 | INSN_AFTER_STALL_P (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->after_stall_p ) = FENCE_AFTER_STALL_P (fence)((fence)->after_stall_p); |
5341 | INSN_SCHED_CYCLE (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->sched_cycle ) = FENCE_CYCLE (fence)((fence)->cycle); |
5342 | |
5343 | /* This does not account for adjust_cost hooks, just add the biggest |
5344 | constant the hook may add to the latency. TODO: make this |
5345 | a target dependent constant. */ |
5346 | INSN_READY_CYCLE (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->ready_cycle ) |
5347 | = INSN_SCHED_CYCLE (insn)((&s_i_d[(sched_luids[INSN_UID (insn)])])->sched_cycle ) + (INSN_CODE (insn)(((insn)->u.fld[5]).rt_int) < 0 |
5348 | ? 1 |
5349 | : maximal_insn_latency (insn) + 1); |
5350 | |
5351 | /* Change these fields last, as they're used above. */ |
5352 | FENCE_AFTER_STALL_P (fence)((fence)->after_stall_p) = 0; |
5353 | if (asm_p || need_stall) |
5354 | advance_one_cycle (fence); |
5355 | |
5356 | /* Indicate that we've scheduled something on this fence. */ |
5357 | FENCE_SCHEDULED_P (fence)((fence)->scheduled_p) = true; |
5358 | scheduled_something_on_previous_fence = true; |
5359 | |
5360 | /* Print debug information when insn's fields are updated. */ |
5361 | if (sched_verbose >= 2) |
5362 | { |
5363 | sel_print ("Scheduling insn: "); |
5364 | dump_insn_1 (insn, 1); |
5365 | sel_print ("\n"); |
5366 | } |
5367 | } |
5368 | |
5369 | /* Update boundary BND (and, if needed, FENCE) with INSN, remove the |
5370 | old boundary from BNDSP, add new boundaries to BNDS_TAIL_P and |
5371 | return it. */ |
5372 | static blist_t * |
5373 | update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp, |
5374 | blist_t *bnds_tailp) |
5375 | { |
5376 | succ_iterator si; |
5377 | insn_t succ; |
5378 | |
5379 | advance_deps_context (BND_DC (bnd)((bnd)->dc), insn); |
5380 | FOR_EACH_SUCC_1 (succ, si, insn,for ((si) = _succ_iter_start (&(succ), (insn), ((1) | (8) )); _succ_iter_cond (&(si), &(succ), (insn), _eligible_successor_edge_p ); _succ_iter_next (&(si))) |
5381 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)for ((si) = _succ_iter_start (&(succ), (insn), ((1) | (8) )); _succ_iter_cond (&(si), &(succ), (insn), _eligible_successor_edge_p ); _succ_iter_next (&(si))) |
5382 | { |
5383 | ilist_t ptr = ilist_copy (BND_PTR (bnd)((bnd)->ptr)); |
5384 | |
5385 | ilist_add (&ptr, insn); |
5386 | |
5387 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) && sel_bb_end_p (insn) |
5388 | && is_ineligible_successor (succ, ptr)) |
5389 | { |
5390 | ilist_clear (&ptr)(_list_clear (&ptr)); |
5391 | continue; |
5392 | } |
5393 | |
5394 | if (FENCE_INSN (fence)((fence)->insn) == insn && !sel_bb_end_p (insn)) |
5395 | { |
5396 | if (sched_verbose >= 9) |
5397 | sel_print ("Updating fence insn from %i to %i\n", |
5398 | INSN_UID (insn), INSN_UID (succ)); |
5399 | FENCE_INSN (fence)((fence)->insn) = succ; |
5400 | } |
5401 | blist_add (bnds_tailp, succ, ptr, BND_DC (bnd)((bnd)->dc)); |
5402 | bnds_tailp = &BLIST_NEXT (*bnds_tailp)(((*bnds_tailp)->next)); |
5403 | } |
5404 | |
5405 | blist_remove (bndsp); |
5406 | return bnds_tailp; |
5407 | } |
5408 | |
5409 | /* Schedule EXPR_VLIW on BND. Return the insn emitted. */ |
5410 | static insn_t |
5411 | schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno) |
5412 | { |
5413 | av_set_t expr_seq; |
5414 | expr_t c_expr = XALLOCA (expr_def)((expr_def *) __builtin_alloca(sizeof (expr_def))); |
5415 | insn_t place_to_insert; |
5416 | insn_t insn; |
5417 | bool should_move; |
5418 | |
5419 | expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true); |
5420 | |
5421 | /* In case of scheduling a jump skipping some other instructions, |
5422 | prepare CFG. After this, jump is at the boundary and can be |
5423 | scheduled as usual insn by MOVE_OP. */ |
5424 | if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)((expr_vliw)->vinsn))) |
5425 | { |
5426 | insn = EXPR_INSN_RTX (expr_vliw)(((((expr_vliw)->vinsn))->insn_rtx)); |
5427 | |
5428 | /* Speculative jumps are not handled. */ |
5429 | if (insn != BND_TO (bnd)((bnd)->to) |
5430 | && !sel_insn_is_speculation_check (insn)) |
5431 | move_cond_jump (insn, bnd); |
5432 | } |
5433 | |
5434 | /* Find a place for C_EXPR to schedule. */ |
5435 | place_to_insert = prepare_place_to_insert (bnd); |
5436 | should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr); |
5437 | clear_expr (c_expr); |
5438 | |
5439 | /* Add the instruction. The corner case to care about is when |
5440 | the expr_seq set has more than one expr, and we chose the one that |
5441 | is not equal to expr_vliw. Then expr_vliw may be insn in stream, and |
5442 | we can't use it. Generate the new vinsn. */ |
5443 | if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw))(PREV_INSN ((((((expr_vliw)->vinsn))->insn_rtx))) && NEXT_INSN ((((((expr_vliw)->vinsn))->insn_rtx))))) |
5444 | { |
5445 | vinsn_t vinsn_new; |
5446 | |
5447 | vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw)((expr_vliw)->vinsn), false); |
5448 | change_vinsn_in_expr (expr_vliw, vinsn_new); |
5449 | should_move = false; |
5450 | } |
5451 | if (should_move) |
5452 | insn = sel_move_insn (expr_vliw, seqno, place_to_insert); |
5453 | else |
5454 | insn = emit_insn_from_expr_after (expr_vliw, NULLnullptr, seqno, |
5455 | place_to_insert); |
5456 | |
5457 | /* Return the nops generated for preserving of data sets back |
5458 | into pool. */ |
5459 | if (INSN_NOP_P (place_to_insert)(PATTERN (place_to_insert) == nop_pattern)) |
5460 | return_nop_to_pool (place_to_insert, !DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)); |
5461 | remove_temp_moveop_nops (!DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)); |
5462 | |
5463 | av_set_clear (&expr_seq); |
5464 | |
5465 | /* Save the expression scheduled so to reset target availability if we'll |
5466 | meet it later on the same fence. */ |
5467 | if (EXPR_WAS_RENAMED (expr_vliw)((expr_vliw)->was_renamed)) |
5468 | vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn)(&(&s_i_d[(sched_luids[INSN_UID (insn)])])->expr)); |
5469 | |
5470 | /* Check that the recent movement didn't destroyed loop |
5471 | structure. */ |
5472 | gcc_assert (!pipelining_p((void)(!(!pipelining_p || current_loop_nest == nullptr || loop_latch_edge (current_loop_nest)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5474, __FUNCTION__), 0 : 0)) |
5473 | || current_loop_nest == NULL((void)(!(!pipelining_p || current_loop_nest == nullptr || loop_latch_edge (current_loop_nest)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5474, __FUNCTION__), 0 : 0)) |
5474 | || loop_latch_edge (current_loop_nest))((void)(!(!pipelining_p || current_loop_nest == nullptr || loop_latch_edge (current_loop_nest)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5474, __FUNCTION__), 0 : 0)); |
5475 | return insn; |
5476 | } |
5477 | |
5478 | /* Stall for N cycles on FENCE. */ |
5479 | static void |
5480 | stall_for_cycles (fence_t fence, int n) |
5481 | { |
5482 | int could_more; |
5483 | |
5484 | could_more = n > 1 || FENCE_ISSUED_INSNS (fence)((fence)->cycle_issued_insns) < issue_rate; |
5485 | while (n--) |
5486 | advance_one_cycle (fence); |
5487 | if (could_more) |
5488 | FENCE_AFTER_STALL_P (fence)((fence)->after_stall_p) = 1; |
5489 | } |
5490 | |
5491 | /* Gather a parallel group of insns at FENCE and assign their seqno |
5492 | to SEQNO. All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP |
5493 | list for later recalculation of seqnos. */ |
5494 | static void |
5495 | fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp) |
5496 | { |
5497 | blist_t bnds = NULLnullptr, *bnds_tailp; |
5498 | av_set_t av_vliw = NULLnullptr; |
5499 | insn_t insn = FENCE_INSN (fence)((fence)->insn); |
5500 | |
5501 | if (sched_verbose >= 2) |
5502 | sel_print ("Starting fill_insns for insn %d, cycle %d\n", |
5503 | INSN_UID (insn), FENCE_CYCLE (fence)((fence)->cycle)); |
5504 | |
5505 | blist_add (&bnds, insn, NULLnullptr, FENCE_DC (fence)((fence)->dc)); |
5506 | bnds_tailp = &BLIST_NEXT (bnds)(((bnds)->next)); |
5507 | set_target_context (FENCE_TC (fence)((fence)->tc)); |
5508 | can_issue_more = FENCE_ISSUE_MORE (fence)((fence)->issue_more); |
5509 | target_bb = INSN_BB (insn)((block_to_bb[(BLOCK_FOR_INSN (insn)->index + 0)])); |
5510 | |
5511 | /* Do while we can add any operation to the current group. */ |
5512 | do |
5513 | { |
5514 | blist_t *bnds_tailp1, *bndsp; |
5515 | expr_t expr_vliw; |
5516 | int need_stall = false; |
5517 | int was_stall = 0, scheduled_insns = 0; |
5518 | int max_insns = pipelining_p ? issue_rate : 2 * issue_rate; |
5519 | int max_stall = pipelining_p ? 1 : 3; |
5520 | bool last_insn_was_debug = false; |
5521 | bool was_debug_bb_end_p = false; |
5522 | |
5523 | compute_av_set_on_boundaries (fence, bnds, &av_vliw); |
5524 | remove_insns_that_need_bookkeeping (fence, &av_vliw); |
5525 | remove_insns_for_debug (bnds, &av_vliw); |
5526 | |
5527 | /* Return early if we have nothing to schedule. */ |
5528 | if (av_vliw == NULLnullptr) |
5529 | break; |
5530 | |
5531 | /* Choose the best expression and, if needed, destination register |
5532 | for it. */ |
5533 | do |
5534 | { |
5535 | expr_vliw = find_best_expr (&av_vliw, bnds, fence, &need_stall); |
5536 | if (! expr_vliw && need_stall) |
5537 | { |
5538 | /* All expressions required a stall. Do not recompute av sets |
5539 | as we'll get the same answer (modulo the insns between |
5540 | the fence and its boundary, which will not be available for |
5541 | pipelining). |
5542 | If we are going to stall for too long, break to recompute av |
5543 | sets and bring more insns for pipelining. */ |
5544 | was_stall++; |
5545 | if (need_stall <= 3) |
5546 | stall_for_cycles (fence, need_stall); |
5547 | else |
5548 | { |
5549 | stall_for_cycles (fence, 1); |
5550 | break; |
5551 | } |
5552 | } |
5553 | } |
5554 | while (! expr_vliw && need_stall); |
5555 | |
5556 | /* Now either we've selected expr_vliw or we have nothing to schedule. */ |
5557 | if (!expr_vliw) |
5558 | { |
5559 | av_set_clear (&av_vliw); |
5560 | break; |
5561 | } |
5562 | |
5563 | bndsp = &bnds; |
5564 | bnds_tailp1 = bnds_tailp; |
5565 | |
5566 | do |
5567 | /* This code will be executed only once until we'd have several |
5568 | boundaries per fence. */ |
5569 | { |
5570 | bnd_t bnd = BLIST_BND (*bndsp)(&(*bndsp)->u.bnd); |
5571 | |
5572 | if (!av_set_is_in_p (BND_AV1 (bnd)((bnd)->av1), EXPR_VINSN (expr_vliw)((expr_vliw)->vinsn))) |
5573 | { |
5574 | bndsp = &BLIST_NEXT (*bndsp)(((*bndsp)->next)); |
5575 | continue; |
5576 | } |
5577 | |
5578 | insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno); |
5579 | last_insn_was_debug = DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN); |
5580 | if (last_insn_was_debug) |
5581 | was_debug_bb_end_p = (insn == BND_TO (bnd)((bnd)->to) && sel_bb_end_p (insn)); |
5582 | update_fence_and_insn (fence, insn, need_stall); |
5583 | bnds_tailp = update_boundaries (fence, bnd, insn, bndsp, bnds_tailp); |
5584 | |
5585 | /* Add insn to the list of scheduled on this cycle instructions. */ |
5586 | ilist_add (*scheduled_insns_tailpp, insn); |
5587 | *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp)(((**scheduled_insns_tailpp)->next)); |
5588 | } |
5589 | while (*bndsp != *bnds_tailp1); |
5590 | |
5591 | av_set_clear (&av_vliw); |
5592 | if (!last_insn_was_debug) |
5593 | scheduled_insns++; |
5594 | |
5595 | /* We currently support information about candidate blocks only for |
5596 | one 'target_bb' block. Hence we can't schedule after jump insn, |
5597 | as this will bring two boundaries and, hence, necessity to handle |
5598 | information for two or more blocks concurrently. */ |
5599 | if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn)) |
5600 | || (was_stall |
5601 | && (was_stall >= max_stall |
5602 | || scheduled_insns >= max_insns))) |
5603 | break; |
5604 | } |
5605 | while (bnds); |
5606 | |
5607 | gcc_assert (!FENCE_BNDS (fence))((void)(!(!((fence)->bnds)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5607, __FUNCTION__), 0 : 0)); |
5608 | |
5609 | /* Update boundaries of the FENCE. */ |
5610 | while (bnds) |
5611 | { |
5612 | ilist_t ptr = BND_PTR (BLIST_BND (bnds))(((&(bnds)->u.bnd))->ptr); |
5613 | |
5614 | if (ptr) |
5615 | { |
5616 | insn = ILIST_INSN (ptr)((ptr)->u.insn); |
5617 | |
5618 | if (!ilist_is_in_p (FENCE_BNDS (fence)((fence)->bnds), insn)) |
5619 | ilist_add (&FENCE_BNDS (fence)((fence)->bnds), insn); |
5620 | } |
5621 | |
5622 | blist_remove (&bnds); |
5623 | } |
5624 | |
5625 | /* Update target context on the fence. */ |
5626 | reset_target_context (FENCE_TC (fence)((fence)->tc), false); |
5627 | } |
5628 | |
5629 | /* All exprs in ORIG_OPS must have the same destination register or memory. |
5630 | Return that destination. */ |
5631 | static rtx |
5632 | get_dest_from_orig_ops (av_set_t orig_ops) |
5633 | { |
5634 | rtx dest = NULL_RTX(rtx) 0; |
5635 | av_set_iterator av_it; |
5636 | expr_t expr; |
5637 | bool first_p = true; |
5638 | |
5639 | FOR_EACH_EXPR (expr, av_it, orig_ops)for (_list_iter_start (&((av_it)), &((orig_ops)), false ); _list_iter_cond_expr (*((av_it)).lp, &((expr))); _list_iter_next (&((av_it)))) |
5640 | { |
5641 | rtx x = EXPR_LHS (expr)(((((&((((expr)->vinsn))->id)))->lhs))); |
5642 | |
5643 | if (first_p) |
5644 | { |
5645 | first_p = false; |
5646 | dest = x; |
5647 | } |
5648 | else |
5649 | gcc_assert (dest == x((void)(!(dest == x || (dest != (rtx) 0 && x != (rtx) 0 && rtx_equal_p (dest, x))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5651, __FUNCTION__), 0 : 0)) |
5650 | || (dest != NULL_RTX && x != NULL_RTX((void)(!(dest == x || (dest != (rtx) 0 && x != (rtx) 0 && rtx_equal_p (dest, x))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5651, __FUNCTION__), 0 : 0)) |
5651 | && rtx_equal_p (dest, x)))((void)(!(dest == x || (dest != (rtx) 0 && x != (rtx) 0 && rtx_equal_p (dest, x))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sel-sched.cc" , 5651, __FUNCTION__), 0 : 0)); |
5652 | } |
5653 | |
5654 | return dest; |
5655 | } |
5656 | |
5657 | /* Update data sets for the bookkeeping block and record those expressions |
5658 | which become no longer available after inserting this bookkeeping. */ |
5659 | static void |
5660 | update_and_record_unavailable_insns (basic_block book_block) |
5661 | { |
5662 | av_set_iterator i; |
5663 | av_set_t old_av_set = NULLnullptr; |
5664 | expr_t cur_expr; |
5665 | rtx_insn *bb_end = sel_bb_end (book_block); |
5666 | |
5667 | /* First, get correct liveness in the bookkeeping block. The problem is |
5668 | the range between the bookeeping insn and the end of block. */ |
5669 | update_liveness_on_insn (bb_end); |
5670 | if (control_flow_insn_p (bb_end)) |
5671 | update_liveness_on_insn (PREV_INSN (bb_end)); |
5672 | |
5673 | /* If there's valid av_set on BOOK_BLOCK, then there might exist another |
5674 | fence above, where we may choose to schedule an insn which is |
5675 | actually blocked from moving up with the bookkeeping we create here. */ |
5676 | if (AV_SET_VALID_P (sel_bb_head (book_block))((get_av_level (sel_bb_head (book_block))) == global_level)) |
5677 | { |
5678 | old_av_set = av_set_copy (BB_AV_SET (book_block)((&sel_region_bb_info[(book_block)->index])->av_set )); |
5679 | update_data_sets (sel_bb_head (book_block)); |
5680 | |
5681 | /* Traverse all the expressions in the old av_set and check whether |
5682 | CUR_EXPR is in new AV_SET. */ |
5683 | FOR_EACH_EXPR (cur_expr, i, old_av_set)for (_list_iter_start (&((i)), &((old_av_set)), false ); _list_iter_cond_expr (*((i)).lp, &((cur_expr))); _list_iter_next (&((i)))) |
5684 | { |
5685 | expr_t new_expr = av_set_lookup (BB_AV_SET (book_block)((&sel_region_bb_info[(book_block)->index])->av_set ), |
5686 | EXPR_VINSN (cur_expr)((cur_expr)->vinsn)); |
5687 | |
5688 | if (! new_expr |
5689 | /* In this case, we can just turn off the E_T_A bit, but we can't |
5690 | represent this information with the current vector. */ |
5691 | || EXPR_TARGET_AVAILABLE (new_expr)((new_expr)->target_available) |
5692 | != EXPR_TARGET_AVAILABLE (cur_expr)((cur_expr)->target_available)) |
5693 | /* Unfortunately, the below code could be also fired up on |
5694 | separable insns, e.g. when moving insns through the new |
5695 | speculation check as in PR 53701. */ |
5696 | vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr); |
5697 | } |
5698 | |
5699 | av_set_clear (&old_av_set); |
5700 | } |
5701 | } |
5702 | |
5703 | /* The main effect of this function is that sparams->c_expr is merged |
5704 | with (or copied to) lparams->c_expr_merged. If there's only one successor, |
5705 | we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged. |
5706 | lparams->c_expr_merged is copied back to sparams->c_expr after all |
5707 | successors has been traversed. lparams->c_expr_local is an expr allocated |
5708 | on stack in the caller function, and is used if there is more than one |
5709 | successor. |
5710 | |
5711 | SUCC is one of the SUCCS_NORMAL successors of INSN, |
5712 | MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ, |
5713 | LPARAMS and STATIC_PARAMS contain the parameters described above. */ |
5714 | static void |
5715 | move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
5716 | insn_t succ ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
5717 | int moveop_drv_call_res, |
5718 | cmpd_local_params_p lparams, void *static_params) |
5719 | { |
5720 | moveop_static_params_p sparams = (moveop_static_params_p) static_params; |
5721 | |
5722 | /* Nothing to do, if original expr wasn't found below. */ |
5723 | if (moveop_drv_call_res != 1) |
5724 | return; |
5725 | |
5726 | /* If this is a first successor. */ |
5727 | if (!lparams->c_expr_merged) |
5728 | { |
5729 | lparams->c_expr_merged = sparams->c_expr; |
5730 | sparams->c_expr = lparams->c_expr_local; |
5731 | } |
5732 | else |
5733 | { |
5734 | /* We must merge all found expressions to get reasonable |
5735 | EXPR_SPEC_DONE_DS for the resulting insn. If we don't |
5736 | do so then we can first find the expr with epsilon |
5737 | speculation success probability and only then with the |
5738 | good probability. As a result the insn will get epsilon |
5739 | probability and will never be scheduled because of |
5740 | weakness_cutoff in find_best_expr. |
5741 | |
5742 | We call merge_expr_data here instead of merge_expr |
5743 | because due to speculation C_EXPR and X may have the |
5744 | same insns with different speculation types. And as of |
5745 | now such insns are considered non-equal. |
5746 | |
5747 | However, EXPR_SCHED_TIMES is different -- we must get |
5748 | SCHED_TIMES from a real insn, not a bookkeeping copy. |
5749 | We force this here. Instead, we may consider merging |
5750 | SCHED_TIMES to the maximum instead of minimum in the |
5751 | below function. */ |
5752 | int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged)((lparams->c_expr_merged)->sched_times); |
5753 | |
5754 | merge_expr_data (lparams->c_expr_merged, sparams->c_expr, NULLnullptr); |
5755 | if (EXPR_SCHED_TIMES (sparams->c_expr)((sparams->c_expr)->sched_times) == 0) |
5756 | EXPR_SCHED_TIMES (lparams->c_expr_merged)((lparams->c_expr_merged)->sched_times) = old_times; |
5757 | |
5758 | clear_expr (sparams->c_expr); |
5759 | } |
5760 | } |
5761 | |
5762 | /* Add used regs for the successor SUCC into SPARAMS->USED_REGS. |
5763 | |
5764 | SUCC is one of the SUCCS_NORMAL successors of INSN, |
5765 | MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ or 0, |
5766 | if SUCC is one of SUCCS_BACK or SUCCS_OUT. |
5767 | STATIC_PARAMS contain USED_REGS set. */ |
5768 | static void |
5769 | fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED__attribute__ ((__unused__)), insn_t succ, |
5770 | int moveop_drv_call_res, |
5771 | cmpd_local_params_p lparams ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
5772 | void *static_params) |
5773 | { |
5774 | regset succ_live; |
5775 | fur_static_params_p sparams = (fur_static_params_p) static_params; |
5776 | |
5777 | /* Here we compute live regsets only for branches that do not lie |
5778 | on the code motion paths. These branches correspond to value |
5779 | MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though |
5780 | for such branches code_motion_path_driver is not called. */ |
5781 | if (moveop_drv_call_res != 0) |
5782 | return; |
5783 | |
5784 | /* Mark all registers that do not meet the following condition: |
5785 | (3) not live on the other path of any conditional branch |
5786 | that is passed by the operation, in case original |
5787 | operations are not present on both paths of the |
5788 | conditional branch. */ |
5789 | succ_live = compute_live (succ); |
5790 | IOR_REG_SET (sparams->used_regs, succ_live)bitmap_ior_into (sparams->used_regs, succ_live); |
5791 | } |
5792 | |
5793 | /* This function is called after the last successor. Copies LP->C_EXPR_MERGED |
5794 | into SP->CEXPR. */ |
5795 | static void |
5796 | move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams) |
5797 | { |
5798 | moveop_static_params_p sp = (moveop_static_params_p) sparams; |