File: | build/gcc/sched-deps.cc |
Warning: | line 2958, column 8 Value stored to 'code' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Instruction scheduling pass. This file computes dependencies between |
2 | instructions. |
3 | Copyright (C) 1992-2023 Free Software Foundation, Inc. |
4 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
5 | and currently maintained by, Jim Wilson (wilson@cygnus.com) |
6 | |
7 | This file is part of GCC. |
8 | |
9 | GCC is free software; you can redistribute it and/or modify it under |
10 | the terms of the GNU General Public License as published by the Free |
11 | Software Foundation; either version 3, or (at your option) any later |
12 | version. |
13 | |
14 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
15 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
16 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
17 | for more details. |
18 | |
19 | You should have received a copy of the GNU General Public License |
20 | along with GCC; see the file COPYING3. If not see |
21 | <http://www.gnu.org/licenses/>. */ |
22 | |
23 | #include "config.h" |
24 | #include "system.h" |
25 | #include "coretypes.h" |
26 | #include "backend.h" |
27 | #include "target.h" |
28 | #include "rtl.h" |
29 | #include "tree.h" |
30 | #include "df.h" |
31 | #include "insn-config.h" |
32 | #include "regs.h" |
33 | #include "memmodel.h" |
34 | #include "ira.h" |
35 | #include "ira-int.h" |
36 | #include "insn-attr.h" |
37 | #include "cfgbuild.h" |
38 | #include "sched-int.h" |
39 | #include "cselib.h" |
40 | #include "function-abi.h" |
41 | |
42 | #ifdef INSN_SCHEDULING |
43 | |
44 | /* Holds current parameters for the dependency analyzer. */ |
45 | struct sched_deps_info_def *sched_deps_info; |
46 | |
47 | /* The data is specific to the Haifa scheduler. */ |
48 | vec<haifa_deps_insn_data_def> |
49 | h_d_i_d = vNULL; |
50 | |
51 | /* Return the major type present in the DS. */ |
52 | enum reg_note |
53 | ds_to_dk (ds_t ds) |
54 | { |
55 | if (ds & DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))) |
56 | return REG_DEP_TRUE; |
57 | |
58 | if (ds & DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1)) |
59 | return REG_DEP_OUTPUT; |
60 | |
61 | if (ds & DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) << 1) << 1)) |
62 | return REG_DEP_CONTROL; |
63 | |
64 | gcc_assert (ds & DEP_ANTI)((void)(!(ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 64, __FUNCTION__), 0 : 0)); |
65 | |
66 | return REG_DEP_ANTI; |
67 | } |
68 | |
69 | /* Return equivalent dep_status. */ |
70 | ds_t |
71 | dk_to_ds (enum reg_note dk) |
72 | { |
73 | switch (dk) |
74 | { |
75 | case REG_DEP_TRUE: |
76 | return DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))); |
77 | |
78 | case REG_DEP_OUTPUT: |
79 | return DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1); |
80 | |
81 | case REG_DEP_CONTROL: |
82 | return DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) << 1) << 1); |
83 | |
84 | default: |
85 | gcc_assert (dk == REG_DEP_ANTI)((void)(!(dk == REG_DEP_ANTI) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 85, __FUNCTION__), 0 : 0)); |
86 | return DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1); |
87 | } |
88 | } |
89 | |
90 | /* Functions to operate with dependence information container - dep_t. */ |
91 | |
92 | /* Init DEP with the arguments. */ |
93 | void |
94 | init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds) |
95 | { |
96 | DEP_PRO (dep)((dep)->pro) = pro; |
97 | DEP_CON (dep)((dep)->con) = con; |
98 | DEP_TYPE (dep)((dep)->type) = type; |
99 | DEP_STATUS (dep)((dep)->status) = ds; |
100 | DEP_COST (dep)((dep)->cost) = UNKNOWN_DEP_COST((int) ((unsigned int) -1 << 19)); |
101 | DEP_NONREG (dep)((dep)->nonreg) = 0; |
102 | DEP_MULTIPLE (dep)((dep)->multiple) = 0; |
103 | DEP_REPLACE (dep)((dep)->replace) = NULLnullptr; |
104 | dep->unused = 0; |
105 | } |
106 | |
107 | /* Init DEP with the arguments. |
108 | While most of the scheduler (including targets) only need the major type |
109 | of the dependency, it is convenient to hide full dep_status from them. */ |
110 | void |
111 | init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind) |
112 | { |
113 | ds_t ds; |
114 | |
115 | if ((current_sched_info->flags & USE_DEPS_LIST)) |
116 | ds = dk_to_ds (kind); |
117 | else |
118 | ds = 0; |
119 | |
120 | init_dep_1 (dep, pro, con, kind, ds); |
121 | } |
122 | |
123 | /* Make a copy of FROM in TO. */ |
124 | static void |
125 | copy_dep (dep_t to, dep_t from) |
126 | { |
127 | memcpy (to, from, sizeof (*to)); |
128 | } |
129 | |
130 | static void dump_ds (FILE *, ds_t); |
131 | |
132 | /* Define flags for dump_dep (). */ |
133 | |
134 | /* Dump producer of the dependence. */ |
135 | #define DUMP_DEP_PRO(2) (2) |
136 | |
137 | /* Dump consumer of the dependence. */ |
138 | #define DUMP_DEP_CON(4) (4) |
139 | |
140 | /* Dump type of the dependence. */ |
141 | #define DUMP_DEP_TYPE(8) (8) |
142 | |
143 | /* Dump status of the dependence. */ |
144 | #define DUMP_DEP_STATUS(16) (16) |
145 | |
146 | /* Dump all information about the dependence. */ |
147 | #define DUMP_DEP_ALL((2) | (4) | (8) |(16)) (DUMP_DEP_PRO(2) | DUMP_DEP_CON(4) | DUMP_DEP_TYPE(8) \ |
148 | |DUMP_DEP_STATUS(16)) |
149 | |
150 | /* Dump DEP to DUMP. |
151 | FLAGS is a bit mask specifying what information about DEP needs |
152 | to be printed. |
153 | If FLAGS has the very first bit set, then dump all information about DEP |
154 | and propagate this bit into the callee dump functions. */ |
155 | static void |
156 | dump_dep (FILE *dump, dep_t dep, int flags) |
157 | { |
158 | if (flags & 1) |
159 | flags |= DUMP_DEP_ALL((2) | (4) | (8) |(16)); |
160 | |
161 | fprintf (dump, "<"); |
162 | |
163 | if (flags & DUMP_DEP_PRO(2)) |
164 | fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)((dep)->pro))); |
165 | |
166 | if (flags & DUMP_DEP_CON(4)) |
167 | fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)((dep)->con))); |
168 | |
169 | if (flags & DUMP_DEP_TYPE(8)) |
170 | { |
171 | char t; |
172 | enum reg_note type = DEP_TYPE (dep)((dep)->type); |
173 | |
174 | switch (type) |
175 | { |
176 | case REG_DEP_TRUE: |
177 | t = 't'; |
178 | break; |
179 | |
180 | case REG_DEP_OUTPUT: |
181 | t = 'o'; |
182 | break; |
183 | |
184 | case REG_DEP_CONTROL: |
185 | t = 'c'; |
186 | break; |
187 | |
188 | case REG_DEP_ANTI: |
189 | t = 'a'; |
190 | break; |
191 | |
192 | default: |
193 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 193, __FUNCTION__)); |
194 | break; |
195 | } |
196 | |
197 | fprintf (dump, "%c; ", t); |
198 | } |
199 | |
200 | if (flags & DUMP_DEP_STATUS(16)) |
201 | { |
202 | if (current_sched_info->flags & USE_DEPS_LIST) |
203 | dump_ds (dump, DEP_STATUS (dep)((dep)->status)); |
204 | } |
205 | |
206 | fprintf (dump, ">"); |
207 | } |
208 | |
209 | /* Default flags for dump_dep (). */ |
210 | static int dump_dep_flags = (DUMP_DEP_PRO(2) | DUMP_DEP_CON(4)); |
211 | |
212 | /* Dump all fields of DEP to STDERR. */ |
213 | void |
214 | sd_debug_dep (dep_t dep) |
215 | { |
216 | dump_dep (stderrstderr, dep, 1); |
217 | fprintf (stderrstderr, "\n"); |
218 | } |
219 | |
220 | /* Determine whether DEP is a dependency link of a non-debug insn on a |
221 | debug insn. */ |
222 | |
223 | static inline bool |
224 | depl_on_debug_p (dep_link_t dep) |
225 | { |
226 | return (DEBUG_INSN_P (DEP_LINK_PRO (dep))(((enum rtx_code) ((((((&(((dep)->node))->dep)))-> pro)))->code) == DEBUG_INSN) |
227 | && !DEBUG_INSN_P (DEP_LINK_CON (dep))(((enum rtx_code) ((((((&(((dep)->node))->dep)))-> con)))->code) == DEBUG_INSN)); |
228 | } |
229 | |
230 | /* Functions to operate with a single link from the dependencies lists - |
231 | dep_link_t. */ |
232 | |
233 | /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by |
234 | PREV_NEXT_P. */ |
235 | static void |
236 | attach_dep_link (dep_link_t l, dep_link_t *prev_nextp) |
237 | { |
238 | dep_link_t next = *prev_nextp; |
239 | |
240 | gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL((void)(!(((l)->prev_nextp) == nullptr && ((l)-> next) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 241, __FUNCTION__), 0 : 0)) |
241 | && DEP_LINK_NEXT (l) == NULL)((void)(!(((l)->prev_nextp) == nullptr && ((l)-> next) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 241, __FUNCTION__), 0 : 0)); |
242 | |
243 | /* Init node being inserted. */ |
244 | DEP_LINK_PREV_NEXTP (l)((l)->prev_nextp) = prev_nextp; |
245 | DEP_LINK_NEXT (l)((l)->next) = next; |
246 | |
247 | /* Fix next node. */ |
248 | if (next != NULLnullptr) |
249 | { |
250 | gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp)((void)(!(((next)->prev_nextp) == prev_nextp) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 250, __FUNCTION__), 0 : 0)); |
251 | |
252 | DEP_LINK_PREV_NEXTP (next)((next)->prev_nextp) = &DEP_LINK_NEXT (l)((l)->next); |
253 | } |
254 | |
255 | /* Fix prev node. */ |
256 | *prev_nextp = l; |
257 | } |
258 | |
259 | /* Add dep_link LINK to deps_list L. */ |
260 | static void |
261 | add_to_deps_list (dep_link_t link, deps_list_t l) |
262 | { |
263 | attach_dep_link (link, &DEPS_LIST_FIRST (l)((l)->first)); |
264 | |
265 | /* Don't count debug deps. */ |
266 | if (!depl_on_debug_p (link)) |
267 | ++DEPS_LIST_N_LINKS (l)((l)->n_links); |
268 | } |
269 | |
270 | /* Detach dep_link L from the list. */ |
271 | static void |
272 | detach_dep_link (dep_link_t l) |
273 | { |
274 | dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l)((l)->prev_nextp); |
275 | dep_link_t next = DEP_LINK_NEXT (l)((l)->next); |
276 | |
277 | *prev_nextp = next; |
278 | |
279 | if (next != NULLnullptr) |
280 | DEP_LINK_PREV_NEXTP (next)((next)->prev_nextp) = prev_nextp; |
281 | |
282 | DEP_LINK_PREV_NEXTP (l)((l)->prev_nextp) = NULLnullptr; |
283 | DEP_LINK_NEXT (l)((l)->next) = NULLnullptr; |
284 | } |
285 | |
286 | /* Remove link LINK from list LIST. */ |
287 | static void |
288 | remove_from_deps_list (dep_link_t link, deps_list_t list) |
289 | { |
290 | detach_dep_link (link); |
291 | |
292 | /* Don't count debug deps. */ |
293 | if (!depl_on_debug_p (link)) |
294 | --DEPS_LIST_N_LINKS (list)((list)->n_links); |
295 | } |
296 | |
297 | /* Move link LINK from list FROM to list TO. */ |
298 | static void |
299 | move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to) |
300 | { |
301 | remove_from_deps_list (link, from); |
302 | add_to_deps_list (link, to); |
303 | } |
304 | |
305 | /* Return true of LINK is not attached to any list. */ |
306 | static bool |
307 | dep_link_is_detached_p (dep_link_t link) |
308 | { |
309 | return DEP_LINK_PREV_NEXTP (link)((link)->prev_nextp) == NULLnullptr; |
310 | } |
311 | |
312 | /* Pool to hold all dependency nodes (dep_node_t). */ |
313 | static object_allocator<_dep_node> *dn_pool; |
314 | |
315 | /* Number of dep_nodes out there. */ |
316 | static int dn_pool_diff = 0; |
317 | |
318 | /* Create a dep_node. */ |
319 | static dep_node_t |
320 | create_dep_node (void) |
321 | { |
322 | dep_node_t n = dn_pool->allocate (); |
323 | dep_link_t back = DEP_NODE_BACK (n)(&(n)->back); |
324 | dep_link_t forw = DEP_NODE_FORW (n)(&(n)->forw); |
325 | |
326 | DEP_LINK_NODE (back)((back)->node) = n; |
327 | DEP_LINK_NEXT (back)((back)->next) = NULLnullptr; |
328 | DEP_LINK_PREV_NEXTP (back)((back)->prev_nextp) = NULLnullptr; |
329 | |
330 | DEP_LINK_NODE (forw)((forw)->node) = n; |
331 | DEP_LINK_NEXT (forw)((forw)->next) = NULLnullptr; |
332 | DEP_LINK_PREV_NEXTP (forw)((forw)->prev_nextp) = NULLnullptr; |
333 | |
334 | ++dn_pool_diff; |
335 | |
336 | return n; |
337 | } |
338 | |
339 | /* Delete dep_node N. N must not be connected to any deps_list. */ |
340 | static void |
341 | delete_dep_node (dep_node_t n) |
342 | { |
343 | gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))((void)(!(dep_link_is_detached_p ((&(n)->back)) && dep_link_is_detached_p ((&(n)->forw))) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 344, __FUNCTION__), 0 : 0)) |
344 | && dep_link_is_detached_p (DEP_NODE_FORW (n)))((void)(!(dep_link_is_detached_p ((&(n)->back)) && dep_link_is_detached_p ((&(n)->forw))) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 344, __FUNCTION__), 0 : 0)); |
345 | |
346 | XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)))free ((void*) ((((&(n)->dep))->replace))); |
347 | |
348 | --dn_pool_diff; |
349 | |
350 | dn_pool->remove (n); |
351 | } |
352 | |
353 | /* Pool to hold dependencies lists (deps_list_t). */ |
354 | static object_allocator<_deps_list> *dl_pool; |
355 | |
356 | /* Number of deps_lists out there. */ |
357 | static int dl_pool_diff = 0; |
358 | |
359 | /* Functions to operate with dependences lists - deps_list_t. */ |
360 | |
361 | /* Return true if list L is empty. */ |
362 | static bool |
363 | deps_list_empty_p (deps_list_t l) |
364 | { |
365 | return DEPS_LIST_N_LINKS (l)((l)->n_links) == 0; |
366 | } |
367 | |
368 | /* Create a new deps_list. */ |
369 | static deps_list_t |
370 | create_deps_list (void) |
371 | { |
372 | deps_list_t l = dl_pool->allocate (); |
373 | |
374 | DEPS_LIST_FIRST (l)((l)->first) = NULLnullptr; |
375 | DEPS_LIST_N_LINKS (l)((l)->n_links) = 0; |
376 | |
377 | ++dl_pool_diff; |
378 | return l; |
379 | } |
380 | |
381 | /* Free deps_list L. */ |
382 | static void |
383 | free_deps_list (deps_list_t l) |
384 | { |
385 | gcc_assert (deps_list_empty_p (l))((void)(!(deps_list_empty_p (l)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 385, __FUNCTION__), 0 : 0)); |
386 | |
387 | --dl_pool_diff; |
388 | |
389 | dl_pool->remove (l); |
390 | } |
391 | |
392 | /* Return true if there is no dep_nodes and deps_lists out there. |
393 | After the region is scheduled all the dependency nodes and lists |
394 | should [generally] be returned to pool. */ |
395 | bool |
396 | deps_pools_are_empty_p (void) |
397 | { |
398 | return dn_pool_diff == 0 && dl_pool_diff == 0; |
399 | } |
400 | |
401 | /* Remove all elements from L. */ |
402 | static void |
403 | clear_deps_list (deps_list_t l) |
404 | { |
405 | do |
406 | { |
407 | dep_link_t link = DEPS_LIST_FIRST (l)((l)->first); |
408 | |
409 | if (link == NULLnullptr) |
410 | break; |
411 | |
412 | remove_from_deps_list (link, l); |
413 | } |
414 | while (1); |
415 | } |
416 | |
417 | /* Decide whether a dependency should be treated as a hard or a speculative |
418 | dependency. */ |
419 | static bool |
420 | dep_spec_p (dep_t dep) |
421 | { |
422 | if (current_sched_info->flags & DO_SPECULATION) |
423 | { |
424 | if (DEP_STATUS (dep)((dep)->status) & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) |
425 | return true; |
426 | } |
427 | if (current_sched_info->flags & DO_PREDICATION) |
428 | { |
429 | if (DEP_TYPE (dep)((dep)->type) == REG_DEP_CONTROL) |
430 | return true; |
431 | } |
432 | if (DEP_REPLACE (dep)((dep)->replace) != NULLnullptr) |
433 | return true; |
434 | return false; |
435 | } |
436 | |
437 | static regset reg_pending_sets; |
438 | static regset reg_pending_clobbers; |
439 | static regset reg_pending_uses; |
440 | static regset reg_pending_control_uses; |
441 | static enum reg_pending_barrier_mode reg_pending_barrier; |
442 | |
443 | /* Hard registers implicitly clobbered or used (or may be implicitly |
444 | clobbered or used) by the currently analyzed insn. For example, |
445 | insn in its constraint has one register class. Even if there is |
446 | currently no hard register in the insn, the particular hard |
447 | register will be in the insn after reload pass because the |
448 | constraint requires it. */ |
449 | static HARD_REG_SET implicit_reg_pending_clobbers; |
450 | static HARD_REG_SET implicit_reg_pending_uses; |
451 | |
452 | /* To speed up the test for duplicate dependency links we keep a |
453 | record of dependencies created by add_dependence when the average |
454 | number of instructions in a basic block is very large. |
455 | |
456 | Studies have shown that there is typically around 5 instructions between |
457 | branches for typical C code. So we can make a guess that the average |
458 | basic block is approximately 5 instructions long; we will choose 100X |
459 | the average size as a very large basic block. |
460 | |
461 | Each insn has associated bitmaps for its dependencies. Each bitmap |
462 | has enough entries to represent a dependency on any other insn in |
463 | the insn chain. All bitmap for true dependencies cache is |
464 | allocated then the rest two ones are also allocated. */ |
465 | static bitmap true_dependency_cache = NULLnullptr; |
466 | static bitmap output_dependency_cache = NULLnullptr; |
467 | static bitmap anti_dependency_cache = NULLnullptr; |
468 | static bitmap control_dependency_cache = NULLnullptr; |
469 | static bitmap spec_dependency_cache = NULLnullptr; |
470 | static int cache_size; |
471 | |
472 | /* True if we should mark added dependencies as a non-register deps. */ |
473 | static bool mark_as_hard; |
474 | |
475 | static int deps_may_trap_p (const_rtx); |
476 | static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note); |
477 | static void add_dependence_list (rtx_insn *, rtx_insn_list *, int, |
478 | enum reg_note, bool); |
479 | static void add_dependence_list_and_free (class deps_desc *, rtx_insn *, |
480 | rtx_insn_list **, int, enum reg_note, |
481 | bool); |
482 | static void delete_all_dependences (rtx_insn *); |
483 | static void chain_to_prev_insn (rtx_insn *); |
484 | |
485 | static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int); |
486 | static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *); |
487 | static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *); |
488 | static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *); |
489 | |
490 | static bool sched_has_condition_p (const rtx_insn *); |
491 | static int conditions_mutex_p (const_rtx, const_rtx, bool, bool); |
492 | |
493 | static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool, |
494 | rtx, rtx); |
495 | static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx); |
496 | |
497 | static void check_dep (dep_t, bool); |
498 | |
499 | |
500 | /* Return nonzero if a load of the memory reference MEM can cause a trap. */ |
501 | |
502 | static int |
503 | deps_may_trap_p (const_rtx mem) |
504 | { |
505 | const_rtx addr = XEXP (mem, 0)(((mem)->u.fld[0]).rt_rtx); |
506 | |
507 | if (REG_P (addr)(((enum rtx_code) (addr)->code) == REG) && REGNO (addr)(rhs_regno(addr)) >= FIRST_PSEUDO_REGISTER76) |
508 | { |
509 | const_rtx t = get_reg_known_value (REGNO (addr)(rhs_regno(addr))); |
510 | if (t) |
511 | addr = t; |
512 | } |
513 | return rtx_addr_can_trap_p (addr); |
514 | } |
515 | |
516 | |
517 | /* Find the condition under which INSN is executed. If REV is not NULL, |
518 | it is set to TRUE when the returned comparison should be reversed |
519 | to get the actual condition. */ |
520 | static rtx |
521 | sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev) |
522 | { |
523 | rtx pat = PATTERN (insn); |
524 | rtx src; |
525 | |
526 | if (rev) |
527 | *rev = false; |
528 | |
529 | if (GET_CODE (pat)((enum rtx_code) (pat)->code) == COND_EXEC) |
530 | return COND_EXEC_TEST (pat)(((pat)->u.fld[0]).rt_rtx); |
531 | |
532 | if (!any_condjump_p (insn) || !onlyjump_p (insn)) |
533 | return 0; |
534 | |
535 | src = SET_SRC (pc_set (insn))(((pc_set (insn))->u.fld[1]).rt_rtx); |
536 | |
537 | if (XEXP (src, 2)(((src)->u.fld[2]).rt_rtx) == pc_rtx) |
538 | return XEXP (src, 0)(((src)->u.fld[0]).rt_rtx); |
539 | else if (XEXP (src, 1)(((src)->u.fld[1]).rt_rtx) == pc_rtx) |
540 | { |
541 | rtx cond = XEXP (src, 0)(((src)->u.fld[0]).rt_rtx); |
542 | enum rtx_code revcode = reversed_comparison_code (cond, insn); |
543 | |
544 | if (revcode == UNKNOWN) |
545 | return 0; |
546 | |
547 | if (rev) |
548 | *rev = true; |
549 | return cond; |
550 | } |
551 | |
552 | return 0; |
553 | } |
554 | |
555 | /* Return the condition under which INSN does not execute (i.e. the |
556 | not-taken condition for a conditional branch), or NULL if we cannot |
557 | find such a condition. The caller should make a copy of the condition |
558 | before using it. */ |
559 | rtx |
560 | sched_get_reverse_condition_uncached (const rtx_insn *insn) |
561 | { |
562 | bool rev; |
563 | rtx cond = sched_get_condition_with_rev_uncached (insn, &rev); |
564 | if (cond == NULL_RTX(rtx) 0) |
565 | return cond; |
566 | if (!rev) |
567 | { |
568 | enum rtx_code revcode = reversed_comparison_code (cond, insn); |
569 | cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),gen_rtx_fmt_ee_stat ((revcode), (((machine_mode) (cond)->mode )), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld[1]) .rt_rtx)) ) |
570 | XEXP (cond, 0),gen_rtx_fmt_ee_stat ((revcode), (((machine_mode) (cond)->mode )), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld[1]) .rt_rtx)) ) |
571 | XEXP (cond, 1))gen_rtx_fmt_ee_stat ((revcode), (((machine_mode) (cond)->mode )), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld[1]) .rt_rtx)) ); |
572 | } |
573 | return cond; |
574 | } |
575 | |
576 | /* Caching variant of sched_get_condition_with_rev_uncached. |
577 | We only do actual work the first time we come here for an insn; the |
578 | results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */ |
579 | static rtx |
580 | sched_get_condition_with_rev (const rtx_insn *insn, bool *rev) |
581 | { |
582 | bool tmp; |
583 | |
584 | if (INSN_LUID (insn)(sched_luids[INSN_UID (insn)]) == 0) |
585 | return sched_get_condition_with_rev_uncached (insn, rev); |
586 | |
587 | if (INSN_CACHED_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond) == const_true_rtx) |
588 | return NULL_RTX(rtx) 0; |
589 | |
590 | if (INSN_CACHED_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond) != NULL_RTX(rtx) 0) |
591 | { |
592 | if (rev) |
593 | *rev = INSN_REVERSE_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->reverse_cond ); |
594 | return INSN_CACHED_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond); |
595 | } |
596 | |
597 | INSN_CACHED_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond) = sched_get_condition_with_rev_uncached (insn, &tmp); |
598 | INSN_REVERSE_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->reverse_cond ) = tmp; |
599 | |
600 | if (INSN_CACHED_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond) == NULL_RTX(rtx) 0) |
601 | { |
602 | INSN_CACHED_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond) = const_true_rtx; |
603 | return NULL_RTX(rtx) 0; |
604 | } |
605 | |
606 | if (rev) |
607 | *rev = INSN_REVERSE_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->reverse_cond ); |
608 | return INSN_CACHED_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond); |
609 | } |
610 | |
611 | /* True when we can find a condition under which INSN is executed. */ |
612 | static bool |
613 | sched_has_condition_p (const rtx_insn *insn) |
614 | { |
615 | return !! sched_get_condition_with_rev (insn, NULLnullptr); |
616 | } |
617 | |
618 | |
619 | |
620 | /* Return nonzero if conditions COND1 and COND2 can never be both true. */ |
621 | static int |
622 | conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2) |
623 | { |
624 | if (COMPARISON_P (cond1)(((rtx_class[(int) (((enum rtx_code) (cond1)->code))]) & (~1)) == (RTX_COMPARE & (~1))) |
625 | && COMPARISON_P (cond2)(((rtx_class[(int) (((enum rtx_code) (cond2)->code))]) & (~1)) == (RTX_COMPARE & (~1))) |
626 | && GET_CODE (cond1)((enum rtx_code) (cond1)->code) == |
627 | (rev1==rev2 |
628 | ? reversed_comparison_code (cond2, NULLnullptr) |
629 | : GET_CODE (cond2)((enum rtx_code) (cond2)->code)) |
630 | && rtx_equal_p (XEXP (cond1, 0)(((cond1)->u.fld[0]).rt_rtx), XEXP (cond2, 0)(((cond2)->u.fld[0]).rt_rtx)) |
631 | && XEXP (cond1, 1)(((cond1)->u.fld[1]).rt_rtx) == XEXP (cond2, 1)(((cond2)->u.fld[1]).rt_rtx)) |
632 | return 1; |
633 | return 0; |
634 | } |
635 | |
636 | /* Return true if insn1 and insn2 can never depend on one another because |
637 | the conditions under which they are executed are mutually exclusive. */ |
638 | bool |
639 | sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2) |
640 | { |
641 | rtx cond1, cond2; |
642 | bool rev1 = false, rev2 = false; |
643 | |
644 | /* df doesn't handle conditional lifetimes entirely correctly; |
645 | calls mess up the conditional lifetimes. */ |
646 | if (!CALL_P (insn1)(((enum rtx_code) (insn1)->code) == CALL_INSN) && !CALL_P (insn2)(((enum rtx_code) (insn2)->code) == CALL_INSN)) |
647 | { |
648 | cond1 = sched_get_condition_with_rev (insn1, &rev1); |
649 | cond2 = sched_get_condition_with_rev (insn2, &rev2); |
650 | if (cond1 && cond2 |
651 | && conditions_mutex_p (cond1, cond2, rev1, rev2) |
652 | /* Make sure first instruction doesn't affect condition of second |
653 | instruction if switched. */ |
654 | && !modified_in_p (cond1, insn2) |
655 | /* Make sure second instruction doesn't affect condition of first |
656 | instruction if switched. */ |
657 | && !modified_in_p (cond2, insn1)) |
658 | return true; |
659 | } |
660 | return false; |
661 | } |
662 | |
663 | |
664 | /* Return true if INSN can potentially be speculated with type DS. */ |
665 | bool |
666 | sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds) |
667 | { |
668 | if (HAS_INTERNAL_DEP (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->has_internal_dep )) |
669 | return false; |
670 | |
671 | if (!NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN)) |
672 | return false; |
673 | |
674 | if (SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 674, __FUNCTION__); _rtx; })->in_struct)) |
675 | return false; |
676 | |
677 | if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn))(sel_sched_p () ? sel_insn_is_speculation_check ((const_cast< struct rtx_insn *> (((insn))))) : ((&h_i_d[INSN_UID (( const_cast<struct rtx_insn *> (((insn)))))])->recovery_block ) != nullptr)) |
678 | return false; |
679 | |
680 | if (side_effects_p (PATTERN (insn))) |
681 | return false; |
682 | |
683 | if (ds & BE_IN_SPEC((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET))) |
684 | /* The following instructions, which depend on a speculatively scheduled |
685 | instruction, cannot be speculatively scheduled along. */ |
686 | { |
687 | if (may_trap_or_fault_p (PATTERN (insn))) |
688 | /* If instruction might fault, it cannot be speculatively scheduled. |
689 | For control speculation it's obvious why and for data speculation |
690 | it's because the insn might get wrong input if speculation |
691 | wasn't successful. */ |
692 | return false; |
693 | |
694 | if ((ds & BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET )) |
695 | && sched_has_condition_p (insn)) |
696 | /* If this is a predicated instruction, then it cannot be |
697 | speculatively scheduled. See PR35659. */ |
698 | return false; |
699 | } |
700 | |
701 | return true; |
702 | } |
703 | |
704 | /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR, |
705 | initialize RESOLVED_P_PTR with true if that list consists of resolved deps, |
706 | and remove the type of returned [through LIST_PTR] list from TYPES_PTR. |
707 | This function is used to switch sd_iterator to the next list. |
708 | !!! For internal use only. Might consider moving it to sched-int.h. */ |
709 | void |
710 | sd_next_list (const_rtx insn, sd_list_types_def *types_ptr, |
711 | deps_list_t *list_ptr, bool *resolved_p_ptr) |
712 | { |
713 | sd_list_types_def types = *types_ptr; |
714 | |
715 | if (types & SD_LIST_HARD_BACK(1)) |
716 | { |
717 | *list_ptr = INSN_HARD_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->hard_back_deps ); |
718 | *resolved_p_ptr = false; |
719 | *types_ptr = types & ~SD_LIST_HARD_BACK(1); |
720 | } |
721 | else if (types & SD_LIST_SPEC_BACK(2)) |
722 | { |
723 | *list_ptr = INSN_SPEC_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->spec_back_deps ); |
724 | *resolved_p_ptr = false; |
725 | *types_ptr = types & ~SD_LIST_SPEC_BACK(2); |
726 | } |
727 | else if (types & SD_LIST_FORW(4)) |
728 | { |
729 | *list_ptr = INSN_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->forw_deps ); |
730 | *resolved_p_ptr = false; |
731 | *types_ptr = types & ~SD_LIST_FORW(4); |
732 | } |
733 | else if (types & SD_LIST_RES_BACK(8)) |
734 | { |
735 | *list_ptr = INSN_RESOLVED_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_back_deps ); |
736 | *resolved_p_ptr = true; |
737 | *types_ptr = types & ~SD_LIST_RES_BACK(8); |
738 | } |
739 | else if (types & SD_LIST_RES_FORW(16)) |
740 | { |
741 | *list_ptr = INSN_RESOLVED_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_forw_deps ); |
742 | *resolved_p_ptr = true; |
743 | *types_ptr = types & ~SD_LIST_RES_FORW(16); |
744 | } |
745 | else |
746 | { |
747 | *list_ptr = NULLnullptr; |
748 | *resolved_p_ptr = false; |
749 | *types_ptr = SD_LIST_NONE(0); |
750 | } |
751 | } |
752 | |
753 | /* Return the summary size of INSN's lists defined by LIST_TYPES. */ |
754 | int |
755 | sd_lists_size (const_rtx insn, sd_list_types_def list_types) |
756 | { |
757 | int size = 0; |
758 | |
759 | while (list_types != SD_LIST_NONE(0)) |
760 | { |
761 | deps_list_t list; |
762 | bool resolved_p; |
763 | |
764 | sd_next_list (insn, &list_types, &list, &resolved_p); |
765 | if (list) |
766 | size += DEPS_LIST_N_LINKS (list)((list)->n_links); |
767 | } |
768 | |
769 | return size; |
770 | } |
771 | |
772 | /* Return true if INSN's lists defined by LIST_TYPES are all empty. */ |
773 | |
774 | bool |
775 | sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types) |
776 | { |
777 | while (list_types != SD_LIST_NONE(0)) |
778 | { |
779 | deps_list_t list; |
780 | bool resolved_p; |
781 | |
782 | sd_next_list (insn, &list_types, &list, &resolved_p); |
783 | if (!deps_list_empty_p (list)) |
784 | return false; |
785 | } |
786 | |
787 | return true; |
788 | } |
789 | |
790 | /* Initialize data for INSN. */ |
791 | void |
792 | sd_init_insn (rtx_insn *insn) |
793 | { |
794 | INSN_HARD_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->hard_back_deps ) = create_deps_list (); |
795 | INSN_SPEC_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->spec_back_deps ) = create_deps_list (); |
796 | INSN_RESOLVED_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_back_deps ) = create_deps_list (); |
797 | INSN_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->forw_deps ) = create_deps_list (); |
798 | INSN_RESOLVED_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_forw_deps ) = create_deps_list (); |
799 | |
800 | /* ??? It would be nice to allocate dependency caches here. */ |
801 | } |
802 | |
803 | /* Free data for INSN. */ |
804 | void |
805 | sd_finish_insn (rtx_insn *insn) |
806 | { |
807 | /* ??? It would be nice to deallocate dependency caches here. */ |
808 | |
809 | free_deps_list (INSN_HARD_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->hard_back_deps )); |
810 | INSN_HARD_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->hard_back_deps ) = NULLnullptr; |
811 | |
812 | free_deps_list (INSN_SPEC_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->spec_back_deps )); |
813 | INSN_SPEC_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->spec_back_deps ) = NULLnullptr; |
814 | |
815 | free_deps_list (INSN_RESOLVED_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_back_deps )); |
816 | INSN_RESOLVED_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_back_deps ) = NULLnullptr; |
817 | |
818 | free_deps_list (INSN_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->forw_deps )); |
819 | INSN_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->forw_deps ) = NULLnullptr; |
820 | |
821 | free_deps_list (INSN_RESOLVED_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_forw_deps )); |
822 | INSN_RESOLVED_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_forw_deps ) = NULLnullptr; |
823 | } |
824 | |
825 | /* Find a dependency between producer PRO and consumer CON. |
826 | Search through resolved dependency lists if RESOLVED_P is true. |
827 | If no such dependency is found return NULL, |
828 | otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull] |
829 | with an iterator pointing to it. */ |
830 | static dep_t |
831 | sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p, |
832 | sd_iterator_def *sd_it_ptr) |
833 | { |
834 | sd_list_types_def pro_list_type; |
835 | sd_list_types_def con_list_type; |
836 | sd_iterator_def sd_it; |
837 | dep_t dep; |
838 | bool found_p = false; |
839 | |
840 | if (resolved_p) |
841 | { |
842 | pro_list_type = SD_LIST_RES_FORW(16); |
843 | con_list_type = SD_LIST_RES_BACK(8); |
844 | } |
845 | else |
846 | { |
847 | pro_list_type = SD_LIST_FORW(4); |
848 | con_list_type = SD_LIST_BACK((1) | (2)); |
849 | } |
850 | |
851 | /* Walk through either back list of INSN or forw list of ELEM |
852 | depending on which one is shorter. */ |
853 | if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type)) |
854 | { |
855 | /* Find the dep_link with producer PRO in consumer's back_deps. */ |
856 | FOR_EACH_DEP (con, con_list_type, sd_it, dep)for ((sd_it) = sd_iterator_start ((con), (con_list_type)); sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next (&(sd_it))) |
857 | if (DEP_PRO (dep)((dep)->pro) == pro) |
858 | { |
859 | found_p = true; |
860 | break; |
861 | } |
862 | } |
863 | else |
864 | { |
865 | /* Find the dep_link with consumer CON in producer's forw_deps. */ |
866 | FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)for ((sd_it) = sd_iterator_start ((pro), (pro_list_type)); sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next (&(sd_it))) |
867 | if (DEP_CON (dep)((dep)->con) == con) |
868 | { |
869 | found_p = true; |
870 | break; |
871 | } |
872 | } |
873 | |
874 | if (found_p) |
875 | { |
876 | if (sd_it_ptr != NULLnullptr) |
877 | *sd_it_ptr = sd_it; |
878 | |
879 | return dep; |
880 | } |
881 | |
882 | return NULLnullptr; |
883 | } |
884 | |
885 | /* Find a dependency between producer PRO and consumer CON. |
886 | Use dependency [if available] to check if dependency is present at all. |
887 | Search through resolved dependency lists if RESOLVED_P is true. |
888 | If the dependency or NULL if none found. */ |
889 | dep_t |
890 | sd_find_dep_between (rtx pro, rtx con, bool resolved_p) |
891 | { |
892 | if (true_dependency_cache != NULLnullptr) |
893 | /* Avoiding the list walk below can cut compile times dramatically |
894 | for some code. */ |
895 | { |
896 | int elem_luid = INSN_LUID (pro)(sched_luids[INSN_UID (pro)]); |
897 | int insn_luid = INSN_LUID (con)(sched_luids[INSN_UID (con)]); |
898 | |
899 | if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid) |
900 | && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid) |
901 | && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid) |
902 | && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) |
903 | return NULLnullptr; |
904 | } |
905 | |
906 | return sd_find_dep_between_no_cache (pro, con, resolved_p, NULLnullptr); |
907 | } |
908 | |
909 | /* Add or update a dependence described by DEP. |
910 | MEM1 and MEM2, if non-null, correspond to memory locations in case of |
911 | data speculation. |
912 | |
913 | The function returns a value indicating if an old entry has been changed |
914 | or a new entry has been added to insn's backward deps. |
915 | |
916 | This function merely checks if producer and consumer is the same insn |
917 | and doesn't create a dep in this case. Actual manipulation of |
918 | dependence data structures is performed in add_or_update_dep_1. */ |
919 | static enum DEPS_ADJUST_RESULT |
920 | maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2) |
921 | { |
922 | rtx_insn *elem = DEP_PRO (dep)((dep)->pro); |
923 | rtx_insn *insn = DEP_CON (dep)((dep)->con); |
924 | |
925 | gcc_assert (INSN_P (insn) && INSN_P (elem))((void)(!((((((enum rtx_code) (insn)->code) == INSN) || (( (enum rtx_code) (insn)->code) == JUMP_INSN) || (((enum rtx_code ) (insn)->code) == CALL_INSN)) || (((enum rtx_code) (insn) ->code) == DEBUG_INSN)) && (((((enum rtx_code) (elem )->code) == INSN) || (((enum rtx_code) (elem)->code) == JUMP_INSN) || (((enum rtx_code) (elem)->code) == CALL_INSN )) || (((enum rtx_code) (elem)->code) == DEBUG_INSN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 925, __FUNCTION__), 0 : 0)); |
926 | |
927 | /* Don't depend an insn on itself. */ |
928 | if (insn == elem) |
929 | { |
930 | if (sched_deps_info->generate_spec_deps) |
931 | /* INSN has an internal dependence, which we can't overcome. */ |
932 | HAS_INTERNAL_DEP (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->has_internal_dep ) = 1; |
933 | |
934 | return DEP_NODEP; |
935 | } |
936 | |
937 | return add_or_update_dep_1 (dep, resolved_p, mem1, mem2); |
938 | } |
939 | |
940 | /* Ask dependency caches what needs to be done for dependence DEP. |
941 | Return DEP_CREATED if new dependence should be created and there is no |
942 | need to try to find one searching the dependencies lists. |
943 | Return DEP_PRESENT if there already is a dependence described by DEP and |
944 | hence nothing is to be done. |
945 | Return DEP_CHANGED if there already is a dependence, but it should be |
946 | updated to incorporate additional information from DEP. */ |
947 | static enum DEPS_ADJUST_RESULT |
948 | ask_dependency_caches (dep_t dep) |
949 | { |
950 | int elem_luid = INSN_LUID (DEP_PRO (dep))(sched_luids[INSN_UID (((dep)->pro))]); |
951 | int insn_luid = INSN_LUID (DEP_CON (dep))(sched_luids[INSN_UID (((dep)->con))]); |
952 | |
953 | gcc_assert (true_dependency_cache != NULL((void)(!(true_dependency_cache != nullptr && output_dependency_cache != nullptr && anti_dependency_cache != nullptr && control_dependency_cache != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 956, __FUNCTION__), 0 : 0)) |
954 | && output_dependency_cache != NULL((void)(!(true_dependency_cache != nullptr && output_dependency_cache != nullptr && anti_dependency_cache != nullptr && control_dependency_cache != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 956, __FUNCTION__), 0 : 0)) |
955 | && anti_dependency_cache != NULL((void)(!(true_dependency_cache != nullptr && output_dependency_cache != nullptr && anti_dependency_cache != nullptr && control_dependency_cache != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 956, __FUNCTION__), 0 : 0)) |
956 | && control_dependency_cache != NULL)((void)(!(true_dependency_cache != nullptr && output_dependency_cache != nullptr && anti_dependency_cache != nullptr && control_dependency_cache != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 956, __FUNCTION__), 0 : 0)); |
957 | |
958 | if (!(current_sched_info->flags & USE_DEPS_LIST)) |
959 | { |
960 | enum reg_note present_dep_type; |
961 | |
962 | if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)) |
963 | present_dep_type = REG_DEP_TRUE; |
964 | else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)) |
965 | present_dep_type = REG_DEP_OUTPUT; |
966 | else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)) |
967 | present_dep_type = REG_DEP_ANTI; |
968 | else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) |
969 | present_dep_type = REG_DEP_CONTROL; |
970 | else |
971 | /* There is no existing dep so it should be created. */ |
972 | return DEP_CREATED; |
973 | |
974 | if ((int) DEP_TYPE (dep)((dep)->type) >= (int) present_dep_type) |
975 | /* DEP does not add anything to the existing dependence. */ |
976 | return DEP_PRESENT; |
977 | } |
978 | else |
979 | { |
980 | ds_t present_dep_types = 0; |
981 | |
982 | if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)) |
983 | present_dep_types |= DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))); |
984 | if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)) |
985 | present_dep_types |= DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1); |
986 | if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)) |
987 | present_dep_types |= DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1); |
988 | if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) |
989 | present_dep_types |= DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) << 1) << 1); |
990 | |
991 | if (present_dep_types == 0) |
992 | /* There is no existing dep so it should be created. */ |
993 | return DEP_CREATED; |
994 | |
995 | if (!(current_sched_info->flags & DO_SPECULATION) |
996 | || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid)) |
997 | { |
998 | if ((present_dep_types | (DEP_STATUS (dep)((dep)->status) & DEP_TYPES((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) | ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1 ) << 1) | ((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) << 1)))) |
999 | == present_dep_types) |
1000 | /* DEP does not add anything to the existing dependence. */ |
1001 | return DEP_PRESENT; |
1002 | } |
1003 | else |
1004 | { |
1005 | /* Only true dependencies can be data speculative and |
1006 | only anti dependencies can be control speculative. */ |
1007 | gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))((void)(!((present_dep_types & ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1))) == present_dep_types ) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1008, __FUNCTION__), 0 : 0)) |
1008 | == present_dep_types)((void)(!((present_dep_types & ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1))) == present_dep_types ) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1008, __FUNCTION__), 0 : 0)); |
1009 | |
1010 | /* if (DEP is SPECULATIVE) then |
1011 | ..we should update DEP_STATUS |
1012 | else |
1013 | ..we should reset existing dep to non-speculative. */ |
1014 | } |
1015 | } |
1016 | |
1017 | return DEP_CHANGED; |
1018 | } |
1019 | |
1020 | /* Set dependency caches according to DEP. */ |
1021 | static void |
1022 | set_dependency_caches (dep_t dep) |
1023 | { |
1024 | int elem_luid = INSN_LUID (DEP_PRO (dep))(sched_luids[INSN_UID (((dep)->pro))]); |
1025 | int insn_luid = INSN_LUID (DEP_CON (dep))(sched_luids[INSN_UID (((dep)->con))]); |
1026 | |
1027 | if (!(current_sched_info->flags & USE_DEPS_LIST)) |
1028 | { |
1029 | switch (DEP_TYPE (dep)((dep)->type)) |
1030 | { |
1031 | case REG_DEP_TRUE: |
1032 | bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid); |
1033 | break; |
1034 | |
1035 | case REG_DEP_OUTPUT: |
1036 | bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid); |
1037 | break; |
1038 | |
1039 | case REG_DEP_ANTI: |
1040 | bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid); |
1041 | break; |
1042 | |
1043 | case REG_DEP_CONTROL: |
1044 | bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid); |
1045 | break; |
1046 | |
1047 | default: |
1048 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1048, __FUNCTION__)); |
1049 | } |
1050 | } |
1051 | else |
1052 | { |
1053 | ds_t ds = DEP_STATUS (dep)((dep)->status); |
1054 | |
1055 | if (ds & DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))) |
1056 | bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid); |
1057 | if (ds & DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1)) |
1058 | bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid); |
1059 | if (ds & DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) |
1060 | bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid); |
1061 | if (ds & DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) << 1) << 1)) |
1062 | bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid); |
1063 | |
1064 | if (ds & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) |
1065 | { |
1066 | gcc_assert (current_sched_info->flags & DO_SPECULATION)((void)(!(current_sched_info->flags & DO_SPECULATION) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1066, __FUNCTION__), 0 : 0)); |
1067 | bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid); |
1068 | } |
1069 | } |
1070 | } |
1071 | |
1072 | /* Type of dependence DEP have changed from OLD_TYPE. Update dependency |
1073 | caches accordingly. */ |
1074 | static void |
1075 | update_dependency_caches (dep_t dep, enum reg_note old_type) |
1076 | { |
1077 | int elem_luid = INSN_LUID (DEP_PRO (dep))(sched_luids[INSN_UID (((dep)->pro))]); |
1078 | int insn_luid = INSN_LUID (DEP_CON (dep))(sched_luids[INSN_UID (((dep)->con))]); |
1079 | |
1080 | /* Clear corresponding cache entry because type of the link |
1081 | may have changed. Keep them if we use_deps_list. */ |
1082 | if (!(current_sched_info->flags & USE_DEPS_LIST)) |
1083 | { |
1084 | switch (old_type) |
1085 | { |
1086 | case REG_DEP_OUTPUT: |
1087 | bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid); |
1088 | break; |
1089 | |
1090 | case REG_DEP_ANTI: |
1091 | bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid); |
1092 | break; |
1093 | |
1094 | case REG_DEP_CONTROL: |
1095 | bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid); |
1096 | break; |
1097 | |
1098 | default: |
1099 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1099, __FUNCTION__)); |
1100 | } |
1101 | } |
1102 | |
1103 | set_dependency_caches (dep); |
1104 | } |
1105 | |
1106 | /* Convert a dependence pointed to by SD_IT to be non-speculative. */ |
1107 | static void |
1108 | change_spec_dep_to_hard (sd_iterator_def sd_it) |
1109 | { |
1110 | dep_node_t node = DEP_LINK_NODE (*sd_it.linkp)((*sd_it.linkp)->node); |
1111 | dep_link_t link = DEP_NODE_BACK (node)(&(node)->back); |
1112 | dep_t dep = DEP_NODE_DEP (node)(&(node)->dep); |
1113 | rtx_insn *elem = DEP_PRO (dep)((dep)->pro); |
1114 | rtx_insn *insn = DEP_CON (dep)((dep)->con); |
1115 | |
1116 | move_dep_link (link, INSN_SPEC_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->spec_back_deps ), INSN_HARD_BACK_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->hard_back_deps )); |
1117 | |
1118 | DEP_STATUS (dep)((dep)->status) &= ~SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
1119 | |
1120 | if (true_dependency_cache != NULLnullptr) |
1121 | /* Clear the cache entry. */ |
1122 | bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)(sched_luids[INSN_UID (insn)])], |
1123 | INSN_LUID (elem)(sched_luids[INSN_UID (elem)])); |
1124 | } |
1125 | |
1126 | /* Update DEP to incorporate information from NEW_DEP. |
1127 | SD_IT points to DEP in case it should be moved to another list. |
1128 | MEM1 and MEM2, if nonnull, correspond to memory locations in case if |
1129 | data-speculative dependence should be updated. */ |
1130 | static enum DEPS_ADJUST_RESULT |
1131 | update_dep (dep_t dep, dep_t new_dep, |
1132 | sd_iterator_def sd_it ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
1133 | rtx mem1 ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
1134 | rtx mem2 ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
1135 | { |
1136 | enum DEPS_ADJUST_RESULT res = DEP_PRESENT; |
1137 | enum reg_note old_type = DEP_TYPE (dep)((dep)->type); |
1138 | bool was_spec = dep_spec_p (dep); |
1139 | |
1140 | DEP_NONREG (dep)((dep)->nonreg) |= DEP_NONREG (new_dep)((new_dep)->nonreg); |
1141 | DEP_MULTIPLE (dep)((dep)->multiple) = 1; |
1142 | |
1143 | /* If this is a more restrictive type of dependence than the |
1144 | existing one, then change the existing dependence to this |
1145 | type. */ |
1146 | if ((int) DEP_TYPE (new_dep)((new_dep)->type) < (int) old_type) |
1147 | { |
1148 | DEP_TYPE (dep)((dep)->type) = DEP_TYPE (new_dep)((new_dep)->type); |
1149 | res = DEP_CHANGED; |
1150 | } |
1151 | |
1152 | if (current_sched_info->flags & USE_DEPS_LIST) |
1153 | /* Update DEP_STATUS. */ |
1154 | { |
1155 | ds_t dep_status = DEP_STATUS (dep)((dep)->status); |
1156 | ds_t ds = DEP_STATUS (new_dep)((new_dep)->status); |
1157 | ds_t new_status = ds | dep_status; |
1158 | |
1159 | if (new_status & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) |
1160 | { |
1161 | /* Either existing dep or a dep we're adding or both are |
1162 | speculative. */ |
1163 | if (!(ds & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) |
1164 | || !(dep_status & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))))) |
1165 | /* The new dep can't be speculative. */ |
1166 | new_status &= ~SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
1167 | else |
1168 | { |
1169 | /* Both are speculative. Merge probabilities. */ |
1170 | if (mem1 != NULLnullptr) |
1171 | { |
1172 | dw_t dw; |
1173 | |
1174 | dw = estimate_dep_weak (mem1, mem2); |
1175 | ds = set_dep_weak (ds, BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ), dw); |
1176 | } |
1177 | |
1178 | new_status = ds_merge (dep_status, ds); |
1179 | } |
1180 | } |
1181 | |
1182 | ds = new_status; |
1183 | |
1184 | if (dep_status != ds) |
1185 | { |
1186 | DEP_STATUS (dep)((dep)->status) = ds; |
1187 | res = DEP_CHANGED; |
1188 | } |
1189 | } |
1190 | |
1191 | if (was_spec && !dep_spec_p (dep)) |
1192 | /* The old dep was speculative, but now it isn't. */ |
1193 | change_spec_dep_to_hard (sd_it); |
1194 | |
1195 | if (true_dependency_cache != NULLnullptr |
1196 | && res == DEP_CHANGED) |
1197 | update_dependency_caches (dep, old_type); |
1198 | |
1199 | return res; |
1200 | } |
1201 | |
1202 | /* Add or update a dependence described by DEP. |
1203 | MEM1 and MEM2, if non-null, correspond to memory locations in case of |
1204 | data speculation. |
1205 | |
1206 | The function returns a value indicating if an old entry has been changed |
1207 | or a new entry has been added to insn's backward deps or nothing has |
1208 | been updated at all. */ |
1209 | static enum DEPS_ADJUST_RESULT |
1210 | add_or_update_dep_1 (dep_t new_dep, bool resolved_p, |
1211 | rtx mem1 ATTRIBUTE_UNUSED__attribute__ ((__unused__)), rtx mem2 ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
1212 | { |
1213 | bool maybe_present_p = true; |
1214 | bool present_p = false; |
1215 | |
1216 | gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))((void)(!((((((enum rtx_code) (((new_dep)->pro))->code) == INSN) || (((enum rtx_code) (((new_dep)->pro))->code ) == JUMP_INSN) || (((enum rtx_code) (((new_dep)->pro))-> code) == CALL_INSN)) || (((enum rtx_code) (((new_dep)->pro ))->code) == DEBUG_INSN)) && (((((enum rtx_code) ( ((new_dep)->con))->code) == INSN) || (((enum rtx_code) ( ((new_dep)->con))->code) == JUMP_INSN) || (((enum rtx_code ) (((new_dep)->con))->code) == CALL_INSN)) || (((enum rtx_code ) (((new_dep)->con))->code) == DEBUG_INSN)) && ( (new_dep)->pro) != ((new_dep)->con)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1217, __FUNCTION__), 0 : 0)) |
1217 | && DEP_PRO (new_dep) != DEP_CON (new_dep))((void)(!((((((enum rtx_code) (((new_dep)->pro))->code) == INSN) || (((enum rtx_code) (((new_dep)->pro))->code ) == JUMP_INSN) || (((enum rtx_code) (((new_dep)->pro))-> code) == CALL_INSN)) || (((enum rtx_code) (((new_dep)->pro ))->code) == DEBUG_INSN)) && (((((enum rtx_code) ( ((new_dep)->con))->code) == INSN) || (((enum rtx_code) ( ((new_dep)->con))->code) == JUMP_INSN) || (((enum rtx_code ) (((new_dep)->con))->code) == CALL_INSN)) || (((enum rtx_code ) (((new_dep)->con))->code) == DEBUG_INSN)) && ( (new_dep)->pro) != ((new_dep)->con)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1217, __FUNCTION__), 0 : 0)); |
1218 | |
1219 | if (flag_checkingglobal_options.x_flag_checking) |
1220 | check_dep (new_dep, mem1 != NULLnullptr); |
1221 | |
1222 | if (true_dependency_cache != NULLnullptr) |
1223 | { |
1224 | switch (ask_dependency_caches (new_dep)) |
1225 | { |
1226 | case DEP_PRESENT: |
1227 | dep_t present_dep; |
1228 | sd_iterator_def sd_it; |
1229 | |
1230 | present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep)((new_dep)->pro), |
1231 | DEP_CON (new_dep)((new_dep)->con), |
1232 | resolved_p, &sd_it); |
1233 | DEP_MULTIPLE (present_dep)((present_dep)->multiple) = 1; |
1234 | return DEP_PRESENT; |
1235 | |
1236 | case DEP_CHANGED: |
1237 | maybe_present_p = true; |
1238 | present_p = true; |
1239 | break; |
1240 | |
1241 | case DEP_CREATED: |
1242 | maybe_present_p = false; |
1243 | present_p = false; |
1244 | break; |
1245 | |
1246 | default: |
1247 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1247, __FUNCTION__)); |
1248 | break; |
1249 | } |
1250 | } |
1251 | |
1252 | /* Check that we don't already have this dependence. */ |
1253 | if (maybe_present_p) |
1254 | { |
1255 | dep_t present_dep; |
1256 | sd_iterator_def sd_it; |
1257 | |
1258 | gcc_assert (true_dependency_cache == NULL || present_p)((void)(!(true_dependency_cache == nullptr || present_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1258, __FUNCTION__), 0 : 0)); |
1259 | |
1260 | present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep)((new_dep)->pro), |
1261 | DEP_CON (new_dep)((new_dep)->con), |
1262 | resolved_p, &sd_it); |
1263 | |
1264 | if (present_dep != NULLnullptr) |
1265 | /* We found an existing dependency between ELEM and INSN. */ |
1266 | return update_dep (present_dep, new_dep, sd_it, mem1, mem2); |
1267 | else |
1268 | /* We didn't find a dep, it shouldn't present in the cache. */ |
1269 | gcc_assert (!present_p)((void)(!(!present_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1269, __FUNCTION__), 0 : 0)); |
1270 | } |
1271 | |
1272 | /* Might want to check one level of transitivity to save conses. |
1273 | This check should be done in maybe_add_or_update_dep_1. |
1274 | Since we made it to add_or_update_dep_1, we must create |
1275 | (or update) a link. */ |
1276 | |
1277 | if (mem1 != NULL_RTX(rtx) 0) |
1278 | { |
1279 | gcc_assert (sched_deps_info->generate_spec_deps)((void)(!(sched_deps_info->generate_spec_deps) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1279, __FUNCTION__), 0 : 0)); |
1280 | DEP_STATUS (new_dep)((new_dep)->status) = set_dep_weak (DEP_STATUS (new_dep)((new_dep)->status), BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ), |
1281 | estimate_dep_weak (mem1, mem2)); |
1282 | } |
1283 | |
1284 | sd_add_dep (new_dep, resolved_p); |
1285 | |
1286 | return DEP_CREATED; |
1287 | } |
1288 | |
1289 | /* Initialize BACK_LIST_PTR with consumer's backward list and |
1290 | FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true |
1291 | initialize with lists that hold resolved deps. */ |
1292 | static void |
1293 | get_back_and_forw_lists (dep_t dep, bool resolved_p, |
1294 | deps_list_t *back_list_ptr, |
1295 | deps_list_t *forw_list_ptr) |
1296 | { |
1297 | rtx_insn *con = DEP_CON (dep)((dep)->con); |
1298 | |
1299 | if (!resolved_p) |
1300 | { |
1301 | if (dep_spec_p (dep)) |
1302 | *back_list_ptr = INSN_SPEC_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->spec_back_deps ); |
1303 | else |
1304 | *back_list_ptr = INSN_HARD_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->hard_back_deps ); |
1305 | |
1306 | *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep))((&h_d_i_d[(sched_luids[INSN_UID (((dep)->pro))])])-> forw_deps); |
1307 | } |
1308 | else |
1309 | { |
1310 | *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->resolved_back_deps ); |
1311 | *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep))((&h_d_i_d[(sched_luids[INSN_UID (((dep)->pro))])])-> resolved_forw_deps); |
1312 | } |
1313 | } |
1314 | |
1315 | /* Add dependence described by DEP. |
1316 | If RESOLVED_P is true treat the dependence as a resolved one. */ |
1317 | void |
1318 | sd_add_dep (dep_t dep, bool resolved_p) |
1319 | { |
1320 | dep_node_t n = create_dep_node (); |
1321 | deps_list_t con_back_deps; |
1322 | deps_list_t pro_forw_deps; |
1323 | rtx_insn *elem = DEP_PRO (dep)((dep)->pro); |
1324 | rtx_insn *insn = DEP_CON (dep)((dep)->con); |
1325 | |
1326 | gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem)((void)(!((((((enum rtx_code) (insn)->code) == INSN) || (( (enum rtx_code) (insn)->code) == JUMP_INSN) || (((enum rtx_code ) (insn)->code) == CALL_INSN)) || (((enum rtx_code) (insn) ->code) == DEBUG_INSN)) && (((((enum rtx_code) (elem )->code) == INSN) || (((enum rtx_code) (elem)->code) == JUMP_INSN) || (((enum rtx_code) (elem)->code) == CALL_INSN )) || (((enum rtx_code) (elem)->code) == DEBUG_INSN)) && insn != elem) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1326, __FUNCTION__), 0 : 0)); |
1327 | |
1328 | if ((current_sched_info->flags & DO_SPECULATION) == 0 |
1329 | || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)((dep)->status))) |
1330 | DEP_STATUS (dep)((dep)->status) &= ~SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
1331 | |
1332 | copy_dep (DEP_NODE_DEP (n)(&(n)->dep), dep); |
1333 | |
1334 | get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps); |
1335 | |
1336 | add_to_deps_list (DEP_NODE_BACK (n)(&(n)->back), con_back_deps); |
1337 | |
1338 | if (flag_checkingglobal_options.x_flag_checking) |
1339 | check_dep (dep, false); |
1340 | |
1341 | add_to_deps_list (DEP_NODE_FORW (n)(&(n)->forw), pro_forw_deps); |
1342 | |
1343 | /* If we are adding a dependency to INSN's LOG_LINKs, then note that |
1344 | in the bitmap caches of dependency information. */ |
1345 | if (true_dependency_cache != NULLnullptr) |
1346 | set_dependency_caches (dep); |
1347 | } |
1348 | |
1349 | /* Add or update backward dependence between INSN and ELEM |
1350 | with given type DEP_TYPE and dep_status DS. |
1351 | This function is a convenience wrapper. */ |
1352 | enum DEPS_ADJUST_RESULT |
1353 | sd_add_or_update_dep (dep_t dep, bool resolved_p) |
1354 | { |
1355 | return add_or_update_dep_1 (dep, resolved_p, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0); |
1356 | } |
1357 | |
1358 | /* Resolved dependence pointed to by SD_IT. |
1359 | SD_IT will advance to the next element. */ |
1360 | void |
1361 | sd_resolve_dep (sd_iterator_def sd_it) |
1362 | { |
1363 | dep_node_t node = DEP_LINK_NODE (*sd_it.linkp)((*sd_it.linkp)->node); |
1364 | dep_t dep = DEP_NODE_DEP (node)(&(node)->dep); |
1365 | rtx_insn *pro = DEP_PRO (dep)((dep)->pro); |
1366 | rtx_insn *con = DEP_CON (dep)((dep)->con); |
1367 | |
1368 | if (dep_spec_p (dep)) |
1369 | move_dep_link (DEP_NODE_BACK (node)(&(node)->back), INSN_SPEC_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->spec_back_deps ), |
1370 | INSN_RESOLVED_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->resolved_back_deps )); |
1371 | else |
1372 | move_dep_link (DEP_NODE_BACK (node)(&(node)->back), INSN_HARD_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->hard_back_deps ), |
1373 | INSN_RESOLVED_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->resolved_back_deps )); |
1374 | |
1375 | move_dep_link (DEP_NODE_FORW (node)(&(node)->forw), INSN_FORW_DEPS (pro)((&h_d_i_d[(sched_luids[INSN_UID (pro)])])->forw_deps), |
1376 | INSN_RESOLVED_FORW_DEPS (pro)((&h_d_i_d[(sched_luids[INSN_UID (pro)])])->resolved_forw_deps )); |
1377 | } |
1378 | |
1379 | /* Perform the inverse operation of sd_resolve_dep. Restore the dependence |
1380 | pointed to by SD_IT to unresolved state. */ |
1381 | void |
1382 | sd_unresolve_dep (sd_iterator_def sd_it) |
1383 | { |
1384 | dep_node_t node = DEP_LINK_NODE (*sd_it.linkp)((*sd_it.linkp)->node); |
1385 | dep_t dep = DEP_NODE_DEP (node)(&(node)->dep); |
1386 | rtx_insn *pro = DEP_PRO (dep)((dep)->pro); |
1387 | rtx_insn *con = DEP_CON (dep)((dep)->con); |
1388 | |
1389 | if (dep_spec_p (dep)) |
1390 | move_dep_link (DEP_NODE_BACK (node)(&(node)->back), INSN_RESOLVED_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->resolved_back_deps ), |
1391 | INSN_SPEC_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->spec_back_deps )); |
1392 | else |
1393 | move_dep_link (DEP_NODE_BACK (node)(&(node)->back), INSN_RESOLVED_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->resolved_back_deps ), |
1394 | INSN_HARD_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->hard_back_deps )); |
1395 | |
1396 | move_dep_link (DEP_NODE_FORW (node)(&(node)->forw), INSN_RESOLVED_FORW_DEPS (pro)((&h_d_i_d[(sched_luids[INSN_UID (pro)])])->resolved_forw_deps ), |
1397 | INSN_FORW_DEPS (pro)((&h_d_i_d[(sched_luids[INSN_UID (pro)])])->forw_deps)); |
1398 | } |
1399 | |
1400 | /* Make TO depend on all the FROM's producers. |
1401 | If RESOLVED_P is true add dependencies to the resolved lists. */ |
1402 | void |
1403 | sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p) |
1404 | { |
1405 | sd_list_types_def list_type; |
1406 | sd_iterator_def sd_it; |
1407 | dep_t dep; |
1408 | |
1409 | list_type = resolved_p ? SD_LIST_RES_BACK(8) : SD_LIST_BACK((1) | (2)); |
1410 | |
1411 | FOR_EACH_DEP (from, list_type, sd_it, dep)for ((sd_it) = sd_iterator_start ((from), (list_type)); sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next (&(sd_it))) |
1412 | { |
1413 | dep_def _new_dep, *new_dep = &_new_dep; |
1414 | |
1415 | copy_dep (new_dep, dep); |
1416 | DEP_CON (new_dep)((new_dep)->con) = to; |
1417 | sd_add_dep (new_dep, resolved_p); |
1418 | } |
1419 | } |
1420 | |
1421 | /* Remove a dependency referred to by SD_IT. |
1422 | SD_IT will point to the next dependence after removal. */ |
1423 | void |
1424 | sd_delete_dep (sd_iterator_def sd_it) |
1425 | { |
1426 | dep_node_t n = DEP_LINK_NODE (*sd_it.linkp)((*sd_it.linkp)->node); |
1427 | dep_t dep = DEP_NODE_DEP (n)(&(n)->dep); |
1428 | rtx_insn *pro = DEP_PRO (dep)((dep)->pro); |
1429 | rtx_insn *con = DEP_CON (dep)((dep)->con); |
1430 | deps_list_t con_back_deps; |
1431 | deps_list_t pro_forw_deps; |
1432 | |
1433 | if (true_dependency_cache != NULLnullptr) |
1434 | { |
1435 | int elem_luid = INSN_LUID (pro)(sched_luids[INSN_UID (pro)]); |
1436 | int insn_luid = INSN_LUID (con)(sched_luids[INSN_UID (con)]); |
1437 | |
1438 | bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid); |
1439 | bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid); |
1440 | bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid); |
1441 | bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid); |
1442 | |
1443 | if (current_sched_info->flags & DO_SPECULATION) |
1444 | bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid); |
1445 | } |
1446 | |
1447 | get_back_and_forw_lists (dep, sd_it.resolved_p, |
1448 | &con_back_deps, &pro_forw_deps); |
1449 | |
1450 | remove_from_deps_list (DEP_NODE_BACK (n)(&(n)->back), con_back_deps); |
1451 | remove_from_deps_list (DEP_NODE_FORW (n)(&(n)->forw), pro_forw_deps); |
1452 | |
1453 | delete_dep_node (n); |
1454 | } |
1455 | |
1456 | /* Dump size of the lists. */ |
1457 | #define DUMP_LISTS_SIZE(2) (2) |
1458 | |
1459 | /* Dump dependencies of the lists. */ |
1460 | #define DUMP_LISTS_DEPS(4) (4) |
1461 | |
1462 | /* Dump all information about the lists. */ |
1463 | #define DUMP_LISTS_ALL((2) | (4)) (DUMP_LISTS_SIZE(2) | DUMP_LISTS_DEPS(4)) |
1464 | |
1465 | /* Dump deps_lists of INSN specified by TYPES to DUMP. |
1466 | FLAGS is a bit mask specifying what information about the lists needs |
1467 | to be printed. |
1468 | If FLAGS has the very first bit set, then dump all information about |
1469 | the lists and propagate this bit into the callee dump functions. */ |
1470 | static void |
1471 | dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags) |
1472 | { |
1473 | sd_iterator_def sd_it; |
1474 | dep_t dep; |
1475 | int all; |
1476 | |
1477 | all = (flags & 1); |
1478 | |
1479 | if (all) |
1480 | flags |= DUMP_LISTS_ALL((2) | (4)); |
1481 | |
1482 | fprintf (dump, "["); |
1483 | |
1484 | if (flags & DUMP_LISTS_SIZE(2)) |
1485 | fprintf (dump, "%d; ", sd_lists_size (insn, types)); |
1486 | |
1487 | if (flags & DUMP_LISTS_DEPS(4)) |
1488 | { |
1489 | FOR_EACH_DEP (insn, types, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn), (types)); sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next (&(sd_it))) |
1490 | { |
1491 | dump_dep (dump, dep, dump_dep_flags | all); |
1492 | fprintf (dump, " "); |
1493 | } |
1494 | } |
1495 | } |
1496 | |
1497 | /* Dump all information about deps_lists of INSN specified by TYPES |
1498 | to STDERR. */ |
1499 | void |
1500 | sd_debug_lists (rtx insn, sd_list_types_def types) |
1501 | { |
1502 | dump_lists (stderrstderr, insn, types, 1); |
1503 | fprintf (stderrstderr, "\n"); |
1504 | } |
1505 | |
1506 | /* A wrapper around add_dependence_1, to add a dependence of CON on |
1507 | PRO, with type DEP_TYPE. This function implements special handling |
1508 | for REG_DEP_CONTROL dependencies. For these, we optionally promote |
1509 | the type to REG_DEP_ANTI if we can determine that predication is |
1510 | impossible; otherwise we add additional true dependencies on the |
1511 | INSN_COND_DEPS list of the jump (which PRO must be). */ |
1512 | void |
1513 | add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type) |
1514 | { |
1515 | if (dep_type == REG_DEP_CONTROL |
1516 | && !(current_sched_info->flags & DO_PREDICATION)) |
1517 | dep_type = REG_DEP_ANTI; |
1518 | |
1519 | /* A REG_DEP_CONTROL dependence may be eliminated through predication, |
1520 | so we must also make the insn dependent on the setter of the |
1521 | condition. */ |
1522 | if (dep_type == REG_DEP_CONTROL) |
1523 | { |
1524 | rtx_insn *real_pro = pro; |
1525 | rtx_insn *other = real_insn_for_shadow (real_pro); |
1526 | rtx cond; |
1527 | |
1528 | if (other != NULL_RTX(rtx) 0) |
1529 | real_pro = other; |
1530 | cond = sched_get_reverse_condition_uncached (real_pro); |
1531 | /* Verify that the insn does not use a different value in |
1532 | the condition register than the one that was present at |
1533 | the jump. */ |
1534 | if (cond == NULL_RTX(rtx) 0) |
1535 | dep_type = REG_DEP_ANTI; |
1536 | else if (INSN_CACHED_COND (real_pro)((&h_d_i_d[(sched_luids[INSN_UID (real_pro)])])->cond) == const_true_rtx) |
1537 | { |
1538 | HARD_REG_SET uses; |
1539 | CLEAR_HARD_REG_SET (uses); |
1540 | note_uses (&PATTERN (con), record_hard_reg_uses, &uses); |
1541 | if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))(rhs_regno((((cond)->u.fld[0]).rt_rtx))))) |
1542 | dep_type = REG_DEP_ANTI; |
1543 | } |
1544 | if (dep_type == REG_DEP_CONTROL) |
1545 | { |
1546 | if (sched_verbose >= 5) |
1547 | fprintf (sched_dump, "making DEP_CONTROL for %d\n", |
1548 | INSN_UID (real_pro)); |
1549 | add_dependence_list (con, INSN_COND_DEPS (real_pro)((&h_d_i_d[(sched_luids[INSN_UID (real_pro)])])->cond_deps ), 0, |
1550 | REG_DEP_TRUE, false); |
1551 | } |
1552 | } |
1553 | |
1554 | add_dependence_1 (con, pro, dep_type); |
1555 | } |
1556 | |
1557 | /* A convenience wrapper to operate on an entire list. HARD should be |
1558 | true if DEP_NONREG should be set on newly created dependencies. */ |
1559 | |
1560 | static void |
1561 | add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond, |
1562 | enum reg_note dep_type, bool hard) |
1563 | { |
1564 | mark_as_hard = hard; |
1565 | for (; list; list = list->next ()) |
1566 | { |
1567 | if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ())) |
1568 | add_dependence (insn, list->insn (), dep_type); |
1569 | } |
1570 | mark_as_hard = false; |
1571 | } |
1572 | |
1573 | /* Similar, but free *LISTP at the same time, when the context |
1574 | is not readonly. HARD should be true if DEP_NONREG should be set on |
1575 | newly created dependencies. */ |
1576 | |
1577 | static void |
1578 | add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn, |
1579 | rtx_insn_list **listp, |
1580 | int uncond, enum reg_note dep_type, bool hard) |
1581 | { |
1582 | add_dependence_list (insn, *listp, uncond, dep_type, hard); |
1583 | |
1584 | /* We don't want to short-circuit dependencies involving debug |
1585 | insns, because they may cause actual dependencies to be |
1586 | disregarded. */ |
1587 | if (deps->readonly || DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
1588 | return; |
1589 | |
1590 | free_INSN_LIST_list (listp); |
1591 | } |
1592 | |
1593 | /* Remove all occurrences of INSN from LIST. Return the number of |
1594 | occurrences removed. */ |
1595 | |
1596 | static int |
1597 | remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp) |
1598 | { |
1599 | int removed = 0; |
1600 | |
1601 | while (*listp) |
1602 | { |
1603 | if ((*listp)->insn () == insn) |
1604 | { |
1605 | remove_free_INSN_LIST_node (listp); |
1606 | removed++; |
1607 | continue; |
1608 | } |
1609 | |
1610 | listp = (rtx_insn_list **)&XEXP (*listp, 1)(((*listp)->u.fld[1]).rt_rtx); |
1611 | } |
1612 | |
1613 | return removed; |
1614 | } |
1615 | |
1616 | /* Same as above, but process two lists at once. */ |
1617 | static int |
1618 | remove_from_both_dependence_lists (rtx_insn *insn, |
1619 | rtx_insn_list **listp, |
1620 | rtx_expr_list **exprp) |
1621 | { |
1622 | int removed = 0; |
1623 | |
1624 | while (*listp) |
1625 | { |
1626 | if (XEXP (*listp, 0)(((*listp)->u.fld[0]).rt_rtx) == insn) |
1627 | { |
1628 | remove_free_INSN_LIST_node (listp); |
1629 | remove_free_EXPR_LIST_node (exprp); |
1630 | removed++; |
1631 | continue; |
1632 | } |
1633 | |
1634 | listp = (rtx_insn_list **)&XEXP (*listp, 1)(((*listp)->u.fld[1]).rt_rtx); |
1635 | exprp = (rtx_expr_list **)&XEXP (*exprp, 1)(((*exprp)->u.fld[1]).rt_rtx); |
1636 | } |
1637 | |
1638 | return removed; |
1639 | } |
1640 | |
1641 | /* Clear all dependencies for an insn. */ |
1642 | static void |
1643 | delete_all_dependences (rtx_insn *insn) |
1644 | { |
1645 | sd_iterator_def sd_it; |
1646 | dep_t dep; |
1647 | |
1648 | /* The below cycle can be optimized to clear the caches and back_deps |
1649 | in one call but that would provoke duplication of code from |
1650 | delete_dep (). */ |
1651 | |
1652 | for (sd_it = sd_iterator_start (insn, SD_LIST_BACK((1) | (2))); |
1653 | sd_iterator_cond (&sd_it, &dep);) |
1654 | sd_delete_dep (sd_it); |
1655 | } |
1656 | |
1657 | /* All insns in a scheduling group except the first should only have |
1658 | dependencies on the previous insn in the group. So we find the |
1659 | first instruction in the scheduling group by walking the dependence |
1660 | chains backwards. Then we add the dependencies for the group to |
1661 | the previous nonnote insn. */ |
1662 | |
1663 | static void |
1664 | chain_to_prev_insn (rtx_insn *insn) |
1665 | { |
1666 | sd_iterator_def sd_it; |
1667 | dep_t dep; |
1668 | rtx_insn *prev_nonnote; |
1669 | |
1670 | FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((insn), (((1) | (2)))); sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next (&(sd_it))) |
1671 | { |
1672 | rtx_insn *i = insn; |
1673 | rtx_insn *pro = DEP_PRO (dep)((dep)->pro); |
1674 | |
1675 | do |
1676 | { |
1677 | i = prev_nonnote_insn (i); |
1678 | |
1679 | if (pro == i) |
1680 | goto next_link; |
1681 | } while (SCHED_GROUP_P (i)(__extension__ ({ __typeof ((i)) const _rtx = ((i)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code ) (_rtx)->code) != INSN && ((enum rtx_code) (_rtx) ->code) != JUMP_INSN && ((enum rtx_code) (_rtx)-> code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P", _rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1681, __FUNCTION__); _rtx; })->in_struct) || DEBUG_INSN_P (i)(((enum rtx_code) (i)->code) == DEBUG_INSN)); |
1682 | |
1683 | if (! sched_insns_conditions_mutex_p (i, pro)) |
1684 | add_dependence (i, pro, DEP_TYPE (dep)((dep)->type)); |
1685 | next_link:; |
1686 | } |
1687 | |
1688 | delete_all_dependences (insn); |
1689 | |
1690 | prev_nonnote = prev_nonnote_nondebug_insn (insn); |
1691 | if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote) |
1692 | && ! sched_insns_conditions_mutex_p (insn, prev_nonnote)) |
1693 | add_dependence (insn, prev_nonnote, REG_DEP_ANTI); |
1694 | } |
1695 | |
1696 | /* Process an insn's memory dependencies. There are four kinds of |
1697 | dependencies: |
1698 | |
1699 | (0) read dependence: read follows read |
1700 | (1) true dependence: read follows write |
1701 | (2) output dependence: write follows write |
1702 | (3) anti dependence: write follows read |
1703 | |
1704 | We are careful to build only dependencies which actually exist, and |
1705 | use transitivity to avoid building too many links. */ |
1706 | |
1707 | /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST. |
1708 | The MEM is a memory reference contained within INSN, which we are saving |
1709 | so that we can do memory aliasing on it. */ |
1710 | |
1711 | static void |
1712 | add_insn_mem_dependence (class deps_desc *deps, bool read_p, |
1713 | rtx_insn *insn, rtx mem) |
1714 | { |
1715 | rtx_insn_list **insn_list; |
1716 | rtx_insn_list *insn_node; |
1717 | rtx_expr_list **mem_list; |
1718 | rtx_expr_list *mem_node; |
1719 | |
1720 | gcc_assert (!deps->readonly)((void)(!(!deps->readonly) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1720, __FUNCTION__), 0 : 0)); |
1721 | if (read_p) |
1722 | { |
1723 | insn_list = &deps->pending_read_insns; |
1724 | mem_list = &deps->pending_read_mems; |
1725 | if (!DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
1726 | deps->pending_read_list_length++; |
1727 | } |
1728 | else |
1729 | { |
1730 | insn_list = &deps->pending_write_insns; |
1731 | mem_list = &deps->pending_write_mems; |
1732 | deps->pending_write_list_length++; |
1733 | } |
1734 | |
1735 | insn_node = alloc_INSN_LIST (insn, *insn_list); |
1736 | *insn_list = insn_node; |
1737 | |
1738 | if (sched_deps_info->use_cselib) |
1739 | { |
1740 | mem = shallow_copy_rtx (mem); |
1741 | XEXP (mem, 0)(((mem)->u.fld[0]).rt_rtx) = cselib_subst_to_values_from_insn (XEXP (mem, 0)(((mem)->u.fld[0]).rt_rtx), |
1742 | GET_MODE (mem)((machine_mode) (mem)->mode), insn); |
1743 | } |
1744 | mem_node = alloc_EXPR_LIST (VOIDmode((void) 0, E_VOIDmode), canon_rtx (mem), *mem_list); |
1745 | *mem_list = mem_node; |
1746 | } |
1747 | |
1748 | /* Make a dependency between every memory reference on the pending lists |
1749 | and INSN, thus flushing the pending lists. FOR_READ is true if emitting |
1750 | dependencies for a read operation, similarly with FOR_WRITE. */ |
1751 | |
1752 | static void |
1753 | flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read, |
1754 | int for_write) |
1755 | { |
1756 | if (for_write) |
1757 | { |
1758 | add_dependence_list_and_free (deps, insn, &deps->pending_read_insns, |
1759 | 1, REG_DEP_ANTI, true); |
1760 | if (!deps->readonly) |
1761 | { |
1762 | free_EXPR_LIST_list (&deps->pending_read_mems); |
1763 | deps->pending_read_list_length = 0; |
1764 | } |
1765 | } |
1766 | |
1767 | add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1, |
1768 | for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT, |
1769 | true); |
1770 | |
1771 | add_dependence_list_and_free (deps, insn, |
1772 | &deps->last_pending_memory_flush, 1, |
1773 | for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT, |
1774 | true); |
1775 | |
1776 | add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1, |
1777 | REG_DEP_ANTI, true); |
1778 | |
1779 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
1780 | { |
1781 | if (for_write) |
1782 | free_INSN_LIST_list (&deps->pending_read_insns); |
1783 | free_INSN_LIST_list (&deps->pending_write_insns); |
1784 | free_INSN_LIST_list (&deps->last_pending_memory_flush); |
1785 | free_INSN_LIST_list (&deps->pending_jump_insns); |
1786 | } |
1787 | |
1788 | if (!deps->readonly) |
1789 | { |
1790 | free_EXPR_LIST_list (&deps->pending_write_mems); |
1791 | deps->pending_write_list_length = 0; |
1792 | |
1793 | deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX(rtx) 0); |
1794 | deps->pending_flush_length = 1; |
1795 | } |
1796 | mark_as_hard = false; |
1797 | } |
1798 | |
1799 | /* Instruction which dependencies we are analyzing. */ |
1800 | static rtx_insn *cur_insn = NULLnullptr; |
1801 | |
1802 | /* Implement hooks for haifa scheduler. */ |
1803 | |
1804 | static void |
1805 | haifa_start_insn (rtx_insn *insn) |
1806 | { |
1807 | gcc_assert (insn && !cur_insn)((void)(!(insn && !cur_insn) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1807, __FUNCTION__), 0 : 0)); |
1808 | |
1809 | cur_insn = insn; |
1810 | } |
1811 | |
1812 | static void |
1813 | haifa_finish_insn (void) |
1814 | { |
1815 | cur_insn = NULLnullptr; |
1816 | } |
1817 | |
1818 | void |
1819 | haifa_note_reg_set (int regno) |
1820 | { |
1821 | SET_REGNO_REG_SET (reg_pending_sets, regno)bitmap_set_bit (reg_pending_sets, regno); |
1822 | } |
1823 | |
1824 | void |
1825 | haifa_note_reg_clobber (int regno) |
1826 | { |
1827 | SET_REGNO_REG_SET (reg_pending_clobbers, regno)bitmap_set_bit (reg_pending_clobbers, regno); |
1828 | } |
1829 | |
1830 | void |
1831 | haifa_note_reg_use (int regno) |
1832 | { |
1833 | SET_REGNO_REG_SET (reg_pending_uses, regno)bitmap_set_bit (reg_pending_uses, regno); |
1834 | } |
1835 | |
1836 | static void |
1837 | haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds) |
1838 | { |
1839 | if (!(ds & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))))) |
1840 | { |
1841 | mem = NULL_RTX(rtx) 0; |
1842 | pending_mem = NULL_RTX(rtx) 0; |
1843 | } |
1844 | else |
1845 | gcc_assert (ds & BEGIN_DATA)((void)(!(ds & (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1845, __FUNCTION__), 0 : 0)); |
1846 | |
1847 | { |
1848 | dep_def _dep, *dep = &_dep; |
1849 | |
1850 | init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds), |
1851 | current_sched_info->flags & USE_DEPS_LIST ? ds : 0); |
1852 | DEP_NONREG (dep)((dep)->nonreg) = 1; |
1853 | maybe_add_or_update_dep_1 (dep, false, pending_mem, mem); |
1854 | } |
1855 | |
1856 | } |
1857 | |
1858 | static void |
1859 | haifa_note_dep (rtx_insn *elem, ds_t ds) |
1860 | { |
1861 | dep_def _dep; |
1862 | dep_t dep = &_dep; |
1863 | |
1864 | init_dep (dep, elem, cur_insn, ds_to_dt (ds)); |
1865 | if (mark_as_hard) |
1866 | DEP_NONREG (dep)((dep)->nonreg) = 1; |
1867 | maybe_add_or_update_dep_1 (dep, false, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0); |
1868 | } |
1869 | |
1870 | static void |
1871 | note_reg_use (int r) |
1872 | { |
1873 | if (sched_deps_info->note_reg_use) |
1874 | sched_deps_info->note_reg_use (r); |
1875 | } |
1876 | |
1877 | static void |
1878 | note_reg_set (int r) |
1879 | { |
1880 | if (sched_deps_info->note_reg_set) |
1881 | sched_deps_info->note_reg_set (r); |
1882 | } |
1883 | |
1884 | static void |
1885 | note_reg_clobber (int r) |
1886 | { |
1887 | if (sched_deps_info->note_reg_clobber) |
1888 | sched_deps_info->note_reg_clobber (r); |
1889 | } |
1890 | |
1891 | static void |
1892 | note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds) |
1893 | { |
1894 | if (sched_deps_info->note_mem_dep) |
1895 | sched_deps_info->note_mem_dep (m1, m2, e, ds); |
1896 | } |
1897 | |
1898 | static void |
1899 | note_dep (rtx_insn *e, ds_t ds) |
1900 | { |
1901 | if (sched_deps_info->note_dep) |
1902 | sched_deps_info->note_dep (e, ds); |
1903 | } |
1904 | |
1905 | /* Return corresponding to DS reg_note. */ |
1906 | enum reg_note |
1907 | ds_to_dt (ds_t ds) |
1908 | { |
1909 | if (ds & DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))) |
1910 | return REG_DEP_TRUE; |
1911 | else if (ds & DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1)) |
1912 | return REG_DEP_OUTPUT; |
1913 | else if (ds & DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) |
1914 | return REG_DEP_ANTI; |
1915 | else |
1916 | { |
1917 | gcc_assert (ds & DEP_CONTROL)((void)(!(ds & ((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) << 1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 1917, __FUNCTION__), 0 : 0)); |
1918 | return REG_DEP_CONTROL; |
1919 | } |
1920 | } |
1921 | |
1922 | |
1923 | |
1924 | /* Functions for computation of info needed for register pressure |
1925 | sensitive insn scheduling. */ |
1926 | |
1927 | |
1928 | /* Allocate and return reg_use_data structure for REGNO and INSN. */ |
1929 | static struct reg_use_data * |
1930 | create_insn_reg_use (int regno, rtx_insn *insn) |
1931 | { |
1932 | struct reg_use_data *use; |
1933 | |
1934 | use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data)); |
1935 | use->regno = regno; |
1936 | use->insn = insn; |
1937 | use->next_insn_use = INSN_REG_USE_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_use_list); |
1938 | INSN_REG_USE_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_use_list) = use; |
1939 | return use; |
1940 | } |
1941 | |
1942 | /* Allocate reg_set_data structure for REGNO and INSN. */ |
1943 | static void |
1944 | create_insn_reg_set (int regno, rtx insn) |
1945 | { |
1946 | struct reg_set_data *set; |
1947 | |
1948 | set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data)); |
1949 | set->regno = regno; |
1950 | set->insn = insn; |
1951 | set->next_insn_set = INSN_REG_SET_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_set_list); |
1952 | INSN_REG_SET_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_set_list) = set; |
1953 | } |
1954 | |
1955 | /* Set up insn register uses for INSN and dependency context DEPS. */ |
1956 | static void |
1957 | setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn) |
1958 | { |
1959 | unsigned i; |
1960 | reg_set_iterator rsi; |
1961 | struct reg_use_data *use, *use2, *next; |
1962 | struct deps_reg *reg_last; |
1963 | |
1964 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_uses), (0), & (i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (& (rsi), &(i))) |
1965 | { |
1966 | if (i < FIRST_PSEUDO_REGISTER76 |
1967 | && TEST_HARD_REG_BIT (ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs), i)) |
1968 | continue; |
1969 | |
1970 | if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX(rtx) 0 |
1971 | && ! REGNO_REG_SET_P (reg_pending_sets, i)bitmap_bit_p (reg_pending_sets, i) |
1972 | && ! REGNO_REG_SET_P (reg_pending_clobbers, i)bitmap_bit_p (reg_pending_clobbers, i)) |
1973 | /* Ignore use which is not dying. */ |
1974 | continue; |
1975 | |
1976 | use = create_insn_reg_use (i, insn); |
1977 | use->next_regno_use = use; |
1978 | reg_last = &deps->reg_last[i]; |
1979 | |
1980 | /* Create the cycle list of uses. */ |
1981 | for (rtx_insn_list *list = reg_last->uses; list; list = list->next ()) |
1982 | { |
1983 | use2 = create_insn_reg_use (i, list->insn ()); |
1984 | next = use->next_regno_use; |
1985 | use->next_regno_use = use2; |
1986 | use2->next_regno_use = next; |
1987 | } |
1988 | } |
1989 | } |
1990 | |
1991 | /* Register pressure info for the currently processed insn. */ |
1992 | static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES((int) LIM_REG_CLASSES)]; |
1993 | |
1994 | /* Return TRUE if INSN has the use structure for REGNO. */ |
1995 | static bool |
1996 | insn_use_p (rtx insn, int regno) |
1997 | { |
1998 | struct reg_use_data *use; |
1999 | |
2000 | for (use = INSN_REG_USE_LIST (insn)((&h_i_d[INSN_UID (insn)])->reg_use_list); use != NULLnullptr; use = use->next_insn_use) |
2001 | if (use->regno == regno) |
2002 | return true; |
2003 | return false; |
2004 | } |
2005 | |
2006 | /* Update the register pressure info after birth of pseudo register REGNO |
2007 | in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that |
2008 | the register is in clobber or unused after the insn. */ |
2009 | static void |
2010 | mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p) |
2011 | { |
2012 | int incr, new_incr; |
2013 | enum reg_class cl; |
2014 | |
2015 | gcc_assert (regno >= FIRST_PSEUDO_REGISTER)((void)(!(regno >= 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2015, __FUNCTION__), 0 : 0)); |
2016 | cl = sched_regno_pressure_class[regno]; |
2017 | if (cl != NO_REGS) |
2018 | { |
2019 | incr = ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[cl][PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)]; |
2020 | if (clobber_p) |
2021 | { |
2022 | new_incr = reg_pressure_info[cl].clobber_increase + incr; |
2023 | reg_pressure_info[cl].clobber_increase = new_incr; |
2024 | } |
2025 | else if (unused_p) |
2026 | { |
2027 | new_incr = reg_pressure_info[cl].unused_set_increase + incr; |
2028 | reg_pressure_info[cl].unused_set_increase = new_incr; |
2029 | } |
2030 | else |
2031 | { |
2032 | new_incr = reg_pressure_info[cl].set_increase + incr; |
2033 | reg_pressure_info[cl].set_increase = new_incr; |
2034 | if (! insn_use_p (insn, regno)) |
2035 | reg_pressure_info[cl].change += incr; |
2036 | create_insn_reg_set (regno, insn); |
2037 | } |
2038 | gcc_assert (new_incr < (1 << INCREASE_BITS))((void)(!(new_incr < (1 << 8)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2038, __FUNCTION__), 0 : 0)); |
2039 | } |
2040 | } |
2041 | |
2042 | /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many |
2043 | hard registers involved in the birth. */ |
2044 | static void |
2045 | mark_insn_hard_regno_birth (rtx insn, int regno, int nregs, |
2046 | bool clobber_p, bool unused_p) |
2047 | { |
2048 | enum reg_class cl; |
2049 | int new_incr, last = regno + nregs; |
2050 | |
2051 | while (regno < last) |
2052 | { |
2053 | gcc_assert (regno < FIRST_PSEUDO_REGISTER)((void)(!(regno < 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2053, __FUNCTION__), 0 : 0)); |
2054 | if (! TEST_HARD_REG_BIT (ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs), regno)) |
2055 | { |
2056 | cl = sched_regno_pressure_class[regno]; |
2057 | if (cl != NO_REGS) |
2058 | { |
2059 | if (clobber_p) |
2060 | { |
2061 | new_incr = reg_pressure_info[cl].clobber_increase + 1; |
2062 | reg_pressure_info[cl].clobber_increase = new_incr; |
2063 | } |
2064 | else if (unused_p) |
2065 | { |
2066 | new_incr = reg_pressure_info[cl].unused_set_increase + 1; |
2067 | reg_pressure_info[cl].unused_set_increase = new_incr; |
2068 | } |
2069 | else |
2070 | { |
2071 | new_incr = reg_pressure_info[cl].set_increase + 1; |
2072 | reg_pressure_info[cl].set_increase = new_incr; |
2073 | if (! insn_use_p (insn, regno)) |
2074 | reg_pressure_info[cl].change += 1; |
2075 | create_insn_reg_set (regno, insn); |
2076 | } |
2077 | gcc_assert (new_incr < (1 << INCREASE_BITS))((void)(!(new_incr < (1 << 8)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2077, __FUNCTION__), 0 : 0)); |
2078 | } |
2079 | } |
2080 | regno++; |
2081 | } |
2082 | } |
2083 | |
2084 | /* Update the register pressure info after birth of pseudo or hard |
2085 | register REG in INSN. Arguments CLOBBER_P and UNUSED_P say |
2086 | correspondingly that the register is in clobber or unused after the |
2087 | insn. */ |
2088 | static void |
2089 | mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p) |
2090 | { |
2091 | int regno; |
2092 | |
2093 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
2094 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
2095 | |
2096 | if (! REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
2097 | return; |
2098 | |
2099 | regno = REGNO (reg)(rhs_regno(reg)); |
2100 | if (regno < FIRST_PSEUDO_REGISTER76) |
2101 | mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg)((&(reg)->u.reg)->nregs), |
2102 | clobber_p, unused_p); |
2103 | else |
2104 | mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p); |
2105 | } |
2106 | |
2107 | /* Update the register pressure info after death of pseudo register |
2108 | REGNO. */ |
2109 | static void |
2110 | mark_pseudo_death (int regno) |
2111 | { |
2112 | int incr; |
2113 | enum reg_class cl; |
2114 | |
2115 | gcc_assert (regno >= FIRST_PSEUDO_REGISTER)((void)(!(regno >= 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2115, __FUNCTION__), 0 : 0)); |
2116 | cl = sched_regno_pressure_class[regno]; |
2117 | if (cl != NO_REGS) |
2118 | { |
2119 | incr = ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[cl][PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)]; |
2120 | reg_pressure_info[cl].change -= incr; |
2121 | } |
2122 | } |
2123 | |
2124 | /* Like mark_pseudo_death except that NREGS saying how many hard |
2125 | registers involved in the death. */ |
2126 | static void |
2127 | mark_hard_regno_death (int regno, int nregs) |
2128 | { |
2129 | enum reg_class cl; |
2130 | int last = regno + nregs; |
2131 | |
2132 | while (regno < last) |
2133 | { |
2134 | gcc_assert (regno < FIRST_PSEUDO_REGISTER)((void)(!(regno < 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2134, __FUNCTION__), 0 : 0)); |
2135 | if (! TEST_HARD_REG_BIT (ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs), regno)) |
2136 | { |
2137 | cl = sched_regno_pressure_class[regno]; |
2138 | if (cl != NO_REGS) |
2139 | reg_pressure_info[cl].change -= 1; |
2140 | } |
2141 | regno++; |
2142 | } |
2143 | } |
2144 | |
2145 | /* Update the register pressure info after death of pseudo or hard |
2146 | register REG. */ |
2147 | static void |
2148 | mark_reg_death (rtx reg) |
2149 | { |
2150 | int regno; |
2151 | |
2152 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
2153 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
2154 | |
2155 | if (! REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
2156 | return; |
2157 | |
2158 | regno = REGNO (reg)(rhs_regno(reg)); |
2159 | if (regno < FIRST_PSEUDO_REGISTER76) |
2160 | mark_hard_regno_death (regno, REG_NREGS (reg)((&(reg)->u.reg)->nregs)); |
2161 | else |
2162 | mark_pseudo_death (regno); |
2163 | } |
2164 | |
2165 | /* Process SETTER of REG. DATA is an insn containing the setter. */ |
2166 | static void |
2167 | mark_insn_reg_store (rtx reg, const_rtx setter, void *data) |
2168 | { |
2169 | if (setter != NULL_RTX(rtx) 0 && GET_CODE (setter)((enum rtx_code) (setter)->code) != SET) |
2170 | return; |
2171 | mark_insn_reg_birth |
2172 | ((rtx) data, reg, false, |
2173 | find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX(rtx) 0); |
2174 | } |
2175 | |
2176 | /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */ |
2177 | static void |
2178 | mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data) |
2179 | { |
2180 | if (GET_CODE (setter)((enum rtx_code) (setter)->code) == CLOBBER) |
2181 | mark_insn_reg_birth ((rtx) data, reg, true, false); |
2182 | } |
2183 | |
2184 | /* Set up reg pressure info related to INSN. */ |
2185 | void |
2186 | init_insn_reg_pressure_info (rtx_insn *insn) |
2187 | { |
2188 | int i, len; |
2189 | enum reg_class cl; |
2190 | static struct reg_pressure_data *pressure_info; |
2191 | rtx link; |
2192 | |
2193 | gcc_assert (sched_pressure != SCHED_PRESSURE_NONE)((void)(!(sched_pressure != SCHED_PRESSURE_NONE) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2193, __FUNCTION__), 0 : 0)); |
2194 | |
2195 | if (! INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
2196 | return; |
2197 | |
2198 | for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++) |
2199 | { |
2200 | cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]; |
2201 | reg_pressure_info[cl].clobber_increase = 0; |
2202 | reg_pressure_info[cl].set_increase = 0; |
2203 | reg_pressure_info[cl].unused_set_increase = 0; |
2204 | reg_pressure_info[cl].change = 0; |
2205 | } |
2206 | |
2207 | note_stores (insn, mark_insn_reg_clobber, insn); |
2208 | |
2209 | note_stores (insn, mark_insn_reg_store, insn); |
2210 | |
2211 | if (AUTO_INC_DEC0) |
2212 | for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
2213 | if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == REG_INC) |
2214 | mark_insn_reg_store (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), NULL_RTX(rtx) 0, insn); |
2215 | |
2216 | for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
2217 | if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == REG_DEAD) |
2218 | mark_reg_death (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx)); |
2219 | |
2220 | len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); |
2221 | pressure_info |
2222 | = INSN_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->reg_pressure) = (struct reg_pressure_data *) xmalloc (len); |
2223 | if (sched_pressure == SCHED_PRESSURE_WEIGHTED) |
2224 | INSN_MAX_REG_PRESSURE (insn)((&h_i_d[INSN_UID (insn)])->max_reg_pressure) = (int *) xcalloc (ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num) |
2225 | * sizeof (int), 1); |
2226 | for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++) |
2227 | { |
2228 | cl = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]; |
2229 | pressure_info[i].clobber_increase |
2230 | = reg_pressure_info[cl].clobber_increase; |
2231 | pressure_info[i].set_increase = reg_pressure_info[cl].set_increase; |
2232 | pressure_info[i].unused_set_increase |
2233 | = reg_pressure_info[cl].unused_set_increase; |
2234 | pressure_info[i].change = reg_pressure_info[cl].change; |
2235 | } |
2236 | } |
2237 | |
2238 | |
2239 | |
2240 | |
2241 | /* Internal variable for sched_analyze_[12] () functions. |
2242 | If it is nonzero, this means that sched_analyze_[12] looks |
2243 | at the most toplevel SET. */ |
2244 | static bool can_start_lhs_rhs_p; |
2245 | |
2246 | /* Extend reg info for the deps context DEPS given that |
2247 | we have just generated a register numbered REGNO. */ |
2248 | static void |
2249 | extend_deps_reg_info (class deps_desc *deps, int regno) |
2250 | { |
2251 | int max_regno = regno + 1; |
2252 | |
2253 | gcc_assert (!reload_completed)((void)(!(!reload_completed) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2253, __FUNCTION__), 0 : 0)); |
2254 | |
2255 | /* In a readonly context, it would not hurt to extend info, |
2256 | but it should not be needed. */ |
2257 | if (reload_completed && deps->readonly) |
2258 | { |
2259 | deps->max_reg = max_regno; |
2260 | return; |
2261 | } |
2262 | |
2263 | if (max_regno > deps->max_reg) |
2264 | { |
2265 | deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,((struct deps_reg *) xrealloc ((void *) (deps->reg_last), sizeof (struct deps_reg) * (max_regno))) |
2266 | max_regno)((struct deps_reg *) xrealloc ((void *) (deps->reg_last), sizeof (struct deps_reg) * (max_regno))); |
2267 | memset (&deps->reg_last[deps->max_reg], |
2268 | 0, (max_regno - deps->max_reg) |
2269 | * sizeof (struct deps_reg)); |
2270 | deps->max_reg = max_regno; |
2271 | } |
2272 | } |
2273 | |
2274 | /* Extends REG_INFO_P if needed. */ |
2275 | void |
2276 | maybe_extend_reg_info_p (void) |
2277 | { |
2278 | /* Extend REG_INFO_P, if needed. */ |
2279 | if ((unsigned int)max_regno - 1 >= reg_info_p_size) |
2280 | { |
2281 | size_t new_reg_info_p_size = max_regno + 128; |
2282 | |
2283 | gcc_assert (!reload_completed && sel_sched_p ())((void)(!(!reload_completed && sel_sched_p ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2283, __FUNCTION__), 0 : 0)); |
2284 | |
2285 | reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p, |
2286 | new_reg_info_p_size, |
2287 | reg_info_p_size, |
2288 | sizeof (*reg_info_p)); |
2289 | reg_info_p_size = new_reg_info_p_size; |
2290 | } |
2291 | } |
2292 | |
2293 | /* Analyze a single reference to register (reg:MODE REGNO) in INSN. |
2294 | The type of the reference is specified by REF and can be SET, |
2295 | CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */ |
2296 | |
2297 | static void |
2298 | sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode, |
2299 | enum rtx_code ref, rtx_insn *insn) |
2300 | { |
2301 | /* We could emit new pseudos in renaming. Extend the reg structures. */ |
2302 | if (!reload_completed && sel_sched_p () |
2303 | && (regno >= max_reg_num () - 1 || regno >= deps->max_reg)) |
2304 | extend_deps_reg_info (deps, regno); |
2305 | |
2306 | maybe_extend_reg_info_p (); |
2307 | |
2308 | /* A hard reg in a wide mode may really be multiple registers. |
2309 | If so, mark all of them just like the first. */ |
2310 | if (regno < FIRST_PSEUDO_REGISTER76) |
2311 | { |
2312 | int i = hard_regno_nregs (regno, mode); |
2313 | if (ref == SET) |
2314 | { |
2315 | while (--i >= 0) |
2316 | note_reg_set (regno + i); |
2317 | } |
2318 | else if (ref == USE) |
2319 | { |
2320 | while (--i >= 0) |
2321 | note_reg_use (regno + i); |
2322 | } |
2323 | else |
2324 | { |
2325 | while (--i >= 0) |
2326 | note_reg_clobber (regno + i); |
2327 | } |
2328 | } |
2329 | |
2330 | /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that |
2331 | it does not reload. Ignore these as they have served their |
2332 | purpose already. */ |
2333 | else if (regno >= deps->max_reg) |
2334 | { |
2335 | enum rtx_code code = GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code); |
2336 | gcc_assert (code == USE || code == CLOBBER)((void)(!(code == USE || code == CLOBBER) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2336, __FUNCTION__), 0 : 0)); |
2337 | } |
2338 | |
2339 | else |
2340 | { |
2341 | if (ref == SET) |
2342 | note_reg_set (regno); |
2343 | else if (ref == USE) |
2344 | note_reg_use (regno); |
2345 | else |
2346 | note_reg_clobber (regno); |
2347 | |
2348 | /* Pseudos that are REG_EQUIV to something may be replaced |
2349 | by that during reloading. We need only add dependencies for |
2350 | the address in the REG_EQUIV note. */ |
2351 | if (!reload_completed && get_reg_known_equiv_p (regno)) |
2352 | { |
2353 | rtx t = get_reg_known_value (regno); |
2354 | if (MEM_P (t)(((enum rtx_code) (t)->code) == MEM)) |
2355 | sched_analyze_2 (deps, XEXP (t, 0)(((t)->u.fld[0]).rt_rtx), insn); |
2356 | } |
2357 | |
2358 | /* Don't let it cross a call after scheduling if it doesn't |
2359 | already cross one. */ |
2360 | if (REG_N_CALLS_CROSSED (regno)(reg_info_p[regno].calls_crossed) == 0) |
2361 | { |
2362 | if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
2363 | deps->sched_before_next_call |
2364 | = alloc_INSN_LIST (insn, deps->sched_before_next_call); |
2365 | else |
2366 | add_dependence_list (insn, deps->last_function_call, 1, |
2367 | REG_DEP_ANTI, false); |
2368 | } |
2369 | } |
2370 | } |
2371 | |
2372 | /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC |
2373 | rtx, X, creating all dependencies generated by the write to the |
2374 | destination of X, and reads of everything mentioned. */ |
2375 | |
2376 | static void |
2377 | sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn) |
2378 | { |
2379 | rtx dest = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx); |
2380 | enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code); |
2381 | bool cslr_p = can_start_lhs_rhs_p; |
2382 | |
2383 | can_start_lhs_rhs_p = false; |
2384 | |
2385 | gcc_assert (dest)((void)(!(dest) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2385, __FUNCTION__), 0 : 0)); |
2386 | if (dest == 0) |
2387 | return; |
2388 | |
2389 | if (cslr_p && sched_deps_info->start_lhs) |
2390 | sched_deps_info->start_lhs (dest); |
2391 | |
2392 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == PARALLEL) |
2393 | { |
2394 | int i; |
2395 | |
2396 | for (i = XVECLEN (dest, 0)(((((dest)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--) |
2397 | if (XEXP (XVECEXP (dest, 0, i), 0)((((((((dest)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx) != 0) |
2398 | sched_analyze_1 (deps, |
2399 | gen_rtx_CLOBBER (VOIDmode,gen_rtx_fmt_e_stat ((CLOBBER), ((((void) 0, E_VOIDmode))), (( ((((((((dest)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx))) ) |
2400 | XEXP (XVECEXP (dest, 0, i), 0))gen_rtx_fmt_e_stat ((CLOBBER), ((((void) 0, E_VOIDmode))), (( ((((((((dest)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx))) ), |
2401 | insn); |
2402 | |
2403 | if (cslr_p && sched_deps_info->finish_lhs) |
2404 | sched_deps_info->finish_lhs (); |
2405 | |
2406 | if (code == SET) |
2407 | { |
2408 | can_start_lhs_rhs_p = cslr_p; |
2409 | |
2410 | sched_analyze_2 (deps, SET_SRC (x)(((x)->u.fld[1]).rt_rtx), insn); |
2411 | |
2412 | can_start_lhs_rhs_p = false; |
2413 | } |
2414 | |
2415 | return; |
2416 | } |
2417 | |
2418 | while (GET_CODE (dest)((enum rtx_code) (dest)->code) == STRICT_LOW_PART || GET_CODE (dest)((enum rtx_code) (dest)->code) == SUBREG |
2419 | || GET_CODE (dest)((enum rtx_code) (dest)->code) == ZERO_EXTRACT) |
2420 | { |
2421 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == STRICT_LOW_PART |
2422 | || GET_CODE (dest)((enum rtx_code) (dest)->code) == ZERO_EXTRACT |
2423 | || read_modify_subreg_p (dest)) |
2424 | { |
2425 | /* These both read and modify the result. We must handle |
2426 | them as writes to get proper dependencies for following |
2427 | instructions. We must handle them as reads to get proper |
2428 | dependencies from this to previous instructions. |
2429 | Thus we need to call sched_analyze_2. */ |
2430 | |
2431 | sched_analyze_2 (deps, XEXP (dest, 0)(((dest)->u.fld[0]).rt_rtx), insn); |
2432 | } |
2433 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == ZERO_EXTRACT) |
2434 | { |
2435 | /* The second and third arguments are values read by this insn. */ |
2436 | sched_analyze_2 (deps, XEXP (dest, 1)(((dest)->u.fld[1]).rt_rtx), insn); |
2437 | sched_analyze_2 (deps, XEXP (dest, 2)(((dest)->u.fld[2]).rt_rtx), insn); |
2438 | } |
2439 | dest = XEXP (dest, 0)(((dest)->u.fld[0]).rt_rtx); |
2440 | } |
2441 | |
2442 | if (REG_P (dest)(((enum rtx_code) (dest)->code) == REG)) |
2443 | { |
2444 | int regno = REGNO (dest)(rhs_regno(dest)); |
2445 | machine_mode mode = GET_MODE (dest)((machine_mode) (dest)->mode); |
2446 | |
2447 | sched_analyze_reg (deps, regno, mode, code, insn); |
2448 | |
2449 | #ifdef STACK_REGS |
2450 | /* Treat all writes to a stack register as modifying the TOS. */ |
2451 | if (regno >= FIRST_STACK_REG8 && regno <= LAST_STACK_REG15) |
2452 | { |
2453 | /* Avoid analyzing the same register twice. */ |
2454 | if (regno != FIRST_STACK_REG8) |
2455 | sched_analyze_reg (deps, FIRST_STACK_REG8, mode, code, insn); |
2456 | |
2457 | add_to_hard_reg_set (&implicit_reg_pending_uses, mode, |
2458 | FIRST_STACK_REG8); |
2459 | } |
2460 | #endif |
2461 | } |
2462 | else if (MEM_P (dest)(((enum rtx_code) (dest)->code) == MEM)) |
2463 | { |
2464 | /* Writing memory. */ |
2465 | rtx t = dest; |
2466 | |
2467 | if (sched_deps_info->use_cselib) |
2468 | { |
2469 | machine_mode address_mode = get_address_mode (dest); |
2470 | |
2471 | t = shallow_copy_rtx (dest); |
2472 | cselib_lookup_from_insn (XEXP (t, 0)(((t)->u.fld[0]).rt_rtx), address_mode, 1, |
2473 | GET_MODE (t)((machine_mode) (t)->mode), insn); |
2474 | XEXP (t, 0)(((t)->u.fld[0]).rt_rtx) |
2475 | = cselib_subst_to_values_from_insn (XEXP (t, 0)(((t)->u.fld[0]).rt_rtx), GET_MODE (t)((machine_mode) (t)->mode), |
2476 | insn); |
2477 | } |
2478 | t = canon_rtx (t); |
2479 | |
2480 | /* Pending lists can't get larger with a readonly context. */ |
2481 | if (!deps->readonly |
2482 | && ((deps->pending_read_list_length + deps->pending_write_list_length) |
2483 | >= param_max_pending_list_lengthglobal_options.x_param_max_pending_list_length)) |
2484 | { |
2485 | /* Flush all pending reads and writes to prevent the pending lists |
2486 | from getting any larger. Insn scheduling runs too slowly when |
2487 | these lists get long. When compiling GCC with itself, |
2488 | this flush occurs 8 times for sparc, and 10 times for m88k using |
2489 | the default value of 32. */ |
2490 | flush_pending_lists (deps, insn, false, true); |
2491 | } |
2492 | else |
2493 | { |
2494 | rtx_insn_list *pending; |
2495 | rtx_expr_list *pending_mem; |
2496 | |
2497 | pending = deps->pending_read_insns; |
2498 | pending_mem = deps->pending_read_mems; |
2499 | while (pending) |
2500 | { |
2501 | if (anti_dependence (pending_mem->element (), t) |
2502 | && ! sched_insns_conditions_mutex_p (insn, pending->insn ())) |
2503 | note_mem_dep (t, pending_mem->element (), pending->insn (), |
2504 | DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)); |
2505 | |
2506 | pending = pending->next (); |
2507 | pending_mem = pending_mem->next (); |
2508 | } |
2509 | |
2510 | pending = deps->pending_write_insns; |
2511 | pending_mem = deps->pending_write_mems; |
2512 | while (pending) |
2513 | { |
2514 | if (output_dependence (pending_mem->element (), t) |
2515 | && ! sched_insns_conditions_mutex_p (insn, pending->insn ())) |
2516 | note_mem_dep (t, pending_mem->element (), |
2517 | pending->insn (), |
2518 | DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1)); |
2519 | |
2520 | pending = pending->next (); |
2521 | pending_mem = pending_mem-> next (); |
2522 | } |
2523 | |
2524 | add_dependence_list (insn, deps->last_pending_memory_flush, 1, |
2525 | REG_DEP_ANTI, true); |
2526 | add_dependence_list (insn, deps->pending_jump_insns, 1, |
2527 | REG_DEP_CONTROL, true); |
2528 | |
2529 | if (!deps->readonly) |
2530 | add_insn_mem_dependence (deps, false, insn, dest); |
2531 | } |
2532 | sched_analyze_2 (deps, XEXP (dest, 0)(((dest)->u.fld[0]).rt_rtx), insn); |
2533 | } |
2534 | |
2535 | if (cslr_p && sched_deps_info->finish_lhs) |
2536 | sched_deps_info->finish_lhs (); |
2537 | |
2538 | /* Analyze reads. */ |
2539 | if (GET_CODE (x)((enum rtx_code) (x)->code) == SET) |
2540 | { |
2541 | can_start_lhs_rhs_p = cslr_p; |
2542 | |
2543 | sched_analyze_2 (deps, SET_SRC (x)(((x)->u.fld[1]).rt_rtx), insn); |
2544 | |
2545 | can_start_lhs_rhs_p = false; |
2546 | } |
2547 | } |
2548 | |
2549 | /* Analyze the uses of memory and registers in rtx X in INSN. */ |
2550 | static void |
2551 | sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn) |
2552 | { |
2553 | int i; |
2554 | int j; |
2555 | enum rtx_code code; |
2556 | const char *fmt; |
2557 | bool cslr_p = can_start_lhs_rhs_p; |
2558 | |
2559 | can_start_lhs_rhs_p = false; |
2560 | |
2561 | gcc_assert (x)((void)(!(x) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2561, __FUNCTION__), 0 : 0)); |
2562 | if (x == 0) |
2563 | return; |
2564 | |
2565 | if (cslr_p && sched_deps_info->start_rhs) |
2566 | sched_deps_info->start_rhs (x); |
2567 | |
2568 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
2569 | |
2570 | switch (code) |
2571 | { |
2572 | CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR: |
2573 | case SYMBOL_REF: |
2574 | case CONST: |
2575 | case LABEL_REF: |
2576 | /* Ignore constants. */ |
2577 | if (cslr_p && sched_deps_info->finish_rhs) |
2578 | sched_deps_info->finish_rhs (); |
2579 | |
2580 | return; |
2581 | |
2582 | case REG: |
2583 | { |
2584 | int regno = REGNO (x)(rhs_regno(x)); |
2585 | machine_mode mode = GET_MODE (x)((machine_mode) (x)->mode); |
2586 | |
2587 | sched_analyze_reg (deps, regno, mode, USE, insn); |
2588 | |
2589 | #ifdef STACK_REGS |
2590 | /* Treat all reads of a stack register as modifying the TOS. */ |
2591 | if (regno >= FIRST_STACK_REG8 && regno <= LAST_STACK_REG15) |
2592 | { |
2593 | /* Avoid analyzing the same register twice. */ |
2594 | if (regno != FIRST_STACK_REG8) |
2595 | sched_analyze_reg (deps, FIRST_STACK_REG8, mode, USE, insn); |
2596 | sched_analyze_reg (deps, FIRST_STACK_REG8, mode, SET, insn); |
2597 | } |
2598 | #endif |
2599 | |
2600 | if (cslr_p && sched_deps_info->finish_rhs) |
2601 | sched_deps_info->finish_rhs (); |
2602 | |
2603 | return; |
2604 | } |
2605 | |
2606 | case MEM: |
2607 | { |
2608 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) && sched_deps_info->use_cselib) |
2609 | { |
2610 | machine_mode address_mode = get_address_mode (x); |
2611 | |
2612 | cselib_lookup_from_insn (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), address_mode, 1, |
2613 | GET_MODE (x)((machine_mode) (x)->mode), insn); |
2614 | } |
2615 | else if (!DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
2616 | { |
2617 | /* Reading memory. */ |
2618 | rtx_insn_list *u; |
2619 | rtx_insn_list *pending; |
2620 | rtx_expr_list *pending_mem; |
2621 | rtx t = x; |
2622 | |
2623 | if (sched_deps_info->use_cselib) |
2624 | { |
2625 | machine_mode address_mode = get_address_mode (t); |
2626 | |
2627 | t = shallow_copy_rtx (t); |
2628 | cselib_lookup_from_insn (XEXP (t, 0)(((t)->u.fld[0]).rt_rtx), address_mode, 1, |
2629 | GET_MODE (t)((machine_mode) (t)->mode), insn); |
2630 | XEXP (t, 0)(((t)->u.fld[0]).rt_rtx) |
2631 | = cselib_subst_to_values_from_insn (XEXP (t, 0)(((t)->u.fld[0]).rt_rtx), GET_MODE (t)((machine_mode) (t)->mode), |
2632 | insn); |
2633 | } |
2634 | |
2635 | t = canon_rtx (t); |
2636 | pending = deps->pending_read_insns; |
2637 | pending_mem = deps->pending_read_mems; |
2638 | while (pending) |
2639 | { |
2640 | if (read_dependence (pending_mem->element (), t) |
2641 | && ! sched_insns_conditions_mutex_p (insn, |
2642 | pending->insn ())) |
2643 | note_mem_dep (t, pending_mem->element (), |
2644 | pending->insn (), |
2645 | DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)); |
2646 | |
2647 | pending = pending->next (); |
2648 | pending_mem = pending_mem->next (); |
2649 | } |
2650 | |
2651 | pending = deps->pending_write_insns; |
2652 | pending_mem = deps->pending_write_mems; |
2653 | while (pending) |
2654 | { |
2655 | if (true_dependence (pending_mem->element (), VOIDmode((void) 0, E_VOIDmode), t) |
2656 | && ! sched_insns_conditions_mutex_p (insn, |
2657 | pending->insn ())) |
2658 | note_mem_dep (t, pending_mem->element (), |
2659 | pending->insn (), |
2660 | sched_deps_info->generate_spec_deps |
2661 | ? BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) : DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))); |
2662 | |
2663 | pending = pending->next (); |
2664 | pending_mem = pending_mem->next (); |
2665 | } |
2666 | |
2667 | for (u = deps->last_pending_memory_flush; u; u = u->next ()) |
2668 | add_dependence (insn, u->insn (), REG_DEP_ANTI); |
2669 | |
2670 | for (u = deps->pending_jump_insns; u; u = u->next ()) |
2671 | if (deps_may_trap_p (x)) |
2672 | { |
2673 | if ((sched_deps_info->generate_spec_deps) |
2674 | && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ))) |
2675 | { |
2676 | ds_t ds = set_dep_weak (DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1), BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ), |
2677 | MAX_DEP_WEAK(((1 << (((8 * 4) - 8) / 4)) - 1) - 1)); |
2678 | |
2679 | note_dep (u->insn (), ds); |
2680 | } |
2681 | else |
2682 | add_dependence (insn, u->insn (), REG_DEP_CONTROL); |
2683 | } |
2684 | } |
2685 | |
2686 | /* Always add these dependencies to pending_reads, since |
2687 | this insn may be followed by a write. */ |
2688 | if (!deps->readonly) |
2689 | { |
2690 | if ((deps->pending_read_list_length |
2691 | + deps->pending_write_list_length) |
2692 | >= param_max_pending_list_lengthglobal_options.x_param_max_pending_list_length |
2693 | && !DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
2694 | flush_pending_lists (deps, insn, true, true); |
2695 | add_insn_mem_dependence (deps, true, insn, x); |
2696 | } |
2697 | |
2698 | sched_analyze_2 (deps, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), insn); |
2699 | |
2700 | if (cslr_p && sched_deps_info->finish_rhs) |
2701 | sched_deps_info->finish_rhs (); |
2702 | |
2703 | return; |
2704 | } |
2705 | |
2706 | /* Force pending stores to memory in case a trap handler needs them. |
2707 | Also force pending loads from memory; loads and stores can segfault |
2708 | and the signal handler won't be triggered if the trap insn was moved |
2709 | above load or store insn. */ |
2710 | case TRAP_IF: |
2711 | flush_pending_lists (deps, insn, true, true); |
2712 | break; |
2713 | |
2714 | case PREFETCH: |
2715 | if (PREFETCH_SCHEDULE_BARRIER_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != PREFETCH) rtl_check_failed_flag ("PREFETCH_SCHEDULE_BARRIER_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2715, __FUNCTION__); _rtx; })->volatil)) |
2716 | reg_pending_barrier = TRUE_BARRIER; |
2717 | /* Prefetch insn contains addresses only. So if the prefetch |
2718 | address has no registers, there will be no dependencies on |
2719 | the prefetch insn. This is wrong with result code |
2720 | correctness point of view as such prefetch can be moved below |
2721 | a jump insn which usually generates MOVE_BARRIER preventing |
2722 | to move insns containing registers or memories through the |
2723 | barrier. It is also wrong with generated code performance |
2724 | point of view as prefetch withouth dependecies will have a |
2725 | tendency to be issued later instead of earlier. It is hard |
2726 | to generate accurate dependencies for prefetch insns as |
2727 | prefetch has only the start address but it is better to have |
2728 | something than nothing. */ |
2729 | if (!deps->readonly) |
2730 | { |
2731 | rtx x = gen_rtx_MEM (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), XEXP (PATTERN (insn), 0)(((PATTERN (insn))->u.fld[0]).rt_rtx)); |
2732 | if (sched_deps_info->use_cselib) |
2733 | cselib_lookup_from_insn (x, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), true, VOIDmode((void) 0, E_VOIDmode), insn); |
2734 | add_insn_mem_dependence (deps, true, insn, x); |
2735 | } |
2736 | break; |
2737 | |
2738 | case UNSPEC_VOLATILE: |
2739 | flush_pending_lists (deps, insn, true, true); |
2740 | /* FALLTHRU */ |
2741 | |
2742 | case ASM_OPERANDS: |
2743 | case ASM_INPUT: |
2744 | { |
2745 | /* Traditional and volatile asm instructions must be considered to use |
2746 | and clobber all hard registers, all pseudo-registers and all of |
2747 | memory. So must TRAP_IF and UNSPEC_VOLATILE operations. |
2748 | |
2749 | Consider for instance a volatile asm that changes the fpu rounding |
2750 | mode. An insn should not be moved across this even if it only uses |
2751 | pseudo-regs because it might give an incorrectly rounded result. */ |
2752 | if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2752, __FUNCTION__); _rtx; })->volatil)) |
2753 | && !DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
2754 | reg_pending_barrier = TRUE_BARRIER; |
2755 | |
2756 | /* For all ASM_OPERANDS, we must traverse the vector of input operands. |
2757 | We cannot just fall through here since then we would be confused |
2758 | by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate |
2759 | traditional asms unlike their normal usage. */ |
2760 | |
2761 | if (code == ASM_OPERANDS) |
2762 | { |
2763 | for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x)(((((x)->u.fld[3]).rt_rtvec))->num_elem); j++) |
2764 | sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j)(((((x)->u.fld[3]).rt_rtvec))->elem[j]), insn); |
2765 | |
2766 | if (cslr_p && sched_deps_info->finish_rhs) |
2767 | sched_deps_info->finish_rhs (); |
2768 | |
2769 | return; |
2770 | } |
2771 | break; |
2772 | } |
2773 | |
2774 | case PRE_DEC: |
2775 | case POST_DEC: |
2776 | case PRE_INC: |
2777 | case POST_INC: |
2778 | /* These both read and modify the result. We must handle them as writes |
2779 | to get proper dependencies for following instructions. We must handle |
2780 | them as reads to get proper dependencies from this to previous |
2781 | instructions. Thus we need to pass them to both sched_analyze_1 |
2782 | and sched_analyze_2. We must call sched_analyze_2 first in order |
2783 | to get the proper antecedent for the read. */ |
2784 | sched_analyze_2 (deps, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), insn); |
2785 | sched_analyze_1 (deps, x, insn); |
2786 | |
2787 | if (cslr_p && sched_deps_info->finish_rhs) |
2788 | sched_deps_info->finish_rhs (); |
2789 | |
2790 | return; |
2791 | |
2792 | case POST_MODIFY: |
2793 | case PRE_MODIFY: |
2794 | /* op0 = op0 + op1 */ |
2795 | sched_analyze_2 (deps, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), insn); |
2796 | sched_analyze_2 (deps, XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), insn); |
2797 | sched_analyze_1 (deps, x, insn); |
2798 | |
2799 | if (cslr_p && sched_deps_info->finish_rhs) |
2800 | sched_deps_info->finish_rhs (); |
2801 | |
2802 | return; |
2803 | |
2804 | default: |
2805 | break; |
2806 | } |
2807 | |
2808 | /* Other cases: walk the insn. */ |
2809 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
2810 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
2811 | { |
2812 | if (fmt[i] == 'e') |
2813 | sched_analyze_2 (deps, XEXP (x, i)(((x)->u.fld[i]).rt_rtx), insn); |
2814 | else if (fmt[i] == 'E') |
2815 | for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++) |
2816 | sched_analyze_2 (deps, XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), insn); |
2817 | } |
2818 | |
2819 | if (cslr_p && sched_deps_info->finish_rhs) |
2820 | sched_deps_info->finish_rhs (); |
2821 | } |
2822 | |
2823 | /* Try to group two fusible insns together to prevent scheduler |
2824 | from scheduling them apart. */ |
2825 | |
2826 | static void |
2827 | sched_macro_fuse_insns (rtx_insn *insn) |
2828 | { |
2829 | rtx_insn *prev; |
2830 | /* No target hook would return true for debug insn as any of the |
2831 | hook operand, and with very large sequences of only debug insns |
2832 | where on each we call sched_macro_fuse_insns it has quadratic |
2833 | compile time complexity. */ |
2834 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
2835 | return; |
2836 | prev = prev_nonnote_nondebug_insn (insn); |
2837 | if (!prev) |
2838 | return; |
2839 | |
2840 | if (any_condjump_p (insn)) |
2841 | { |
2842 | unsigned int condreg1, condreg2; |
2843 | rtx cc_reg_1; |
2844 | if (targetm.fixed_condition_code_regs (&condreg1, &condreg2)) |
2845 | { |
2846 | cc_reg_1 = gen_rtx_REG (CCmode((void) 0, E_CCmode), condreg1); |
2847 | if (reg_referenced_p (cc_reg_1, PATTERN (insn)) |
2848 | && modified_in_p (cc_reg_1, prev)) |
2849 | { |
2850 | if (targetm.sched.macro_fusion_pair_p (prev, insn)) |
2851 | SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2851, __FUNCTION__); _rtx; })->in_struct) = 1; |
2852 | return; |
2853 | } |
2854 | } |
2855 | } |
2856 | |
2857 | if (single_set (insn) && single_set (prev)) |
2858 | { |
2859 | if (targetm.sched.macro_fusion_pair_p (prev, insn)) |
2860 | SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2860, __FUNCTION__); _rtx; })->in_struct) = 1; |
2861 | } |
2862 | } |
2863 | |
2864 | /* Get the implicit reg pending clobbers for INSN and save them in TEMP. */ |
2865 | void |
2866 | get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn) |
2867 | { |
2868 | extract_insn (insn); |
2869 | preprocess_constraints (insn); |
2870 | alternative_mask preferred = get_preferred_alternatives (insn); |
2871 | ira_implicitly_set_insn_hard_regs (temp, preferred); |
2872 | *temp &= ~ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs); |
2873 | } |
2874 | |
2875 | /* Analyze an INSN with pattern X to find all dependencies. */ |
2876 | static void |
2877 | sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn) |
2878 | { |
2879 | RTX_CODEenum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code); |
2880 | rtx link; |
2881 | unsigned i; |
2882 | reg_set_iterator rsi; |
2883 | |
2884 | if (! reload_completed) |
2885 | { |
2886 | HARD_REG_SET temp; |
2887 | get_implicit_reg_pending_clobbers (&temp, insn); |
2888 | implicit_reg_pending_clobbers |= temp; |
2889 | } |
2890 | |
2891 | can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) |
2892 | && code == SET); |
2893 | |
2894 | /* Group compare and branch insns for macro-fusion. */ |
2895 | if (!deps->readonly |
2896 | && targetm.sched.macro_fusion_p |
2897 | && targetm.sched.macro_fusion_p ()) |
2898 | sched_macro_fuse_insns (insn); |
2899 | |
2900 | if (may_trap_p (x)) |
2901 | /* Avoid moving trapping instructions across function calls that might |
2902 | not always return. */ |
2903 | add_dependence_list (insn, deps->last_function_call_may_noreturn, |
2904 | 1, REG_DEP_ANTI, true); |
2905 | |
2906 | /* We must avoid creating a situation in which two successors of the |
2907 | current block have different unwind info after scheduling. If at any |
2908 | point the two paths re-join this leads to incorrect unwind info. */ |
2909 | /* ??? There are certain situations involving a forced frame pointer in |
2910 | which, with extra effort, we could fix up the unwind info at a later |
2911 | CFG join. However, it seems better to notice these cases earlier |
2912 | during prologue generation and avoid marking the frame pointer setup |
2913 | as frame-related at all. */ |
2914 | if (RTX_FRAME_RELATED_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != CALL_INSN && ((enum rtx_code) ( _rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx )->code) != BARRIER && ((enum rtx_code) (_rtx)-> code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2914, __FUNCTION__); _rtx; })->frame_related)) |
2915 | { |
2916 | /* Make sure prologue insn is scheduled before next jump. */ |
2917 | deps->sched_before_next_jump |
2918 | = alloc_INSN_LIST (insn, deps->sched_before_next_jump); |
2919 | |
2920 | /* Make sure epilogue insn is scheduled after preceding jumps. */ |
2921 | add_dependence_list (insn, deps->last_pending_memory_flush, 1, |
2922 | REG_DEP_ANTI, true); |
2923 | add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI, |
2924 | true); |
2925 | } |
2926 | |
2927 | if (code == COND_EXEC) |
2928 | { |
2929 | sched_analyze_2 (deps, COND_EXEC_TEST (x)(((x)->u.fld[0]).rt_rtx), insn); |
2930 | |
2931 | /* ??? Should be recording conditions so we reduce the number of |
2932 | false dependencies. */ |
2933 | x = COND_EXEC_CODE (x)(((x)->u.fld[1]).rt_rtx); |
2934 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
2935 | } |
2936 | if (code == SET || code == CLOBBER) |
2937 | { |
2938 | sched_analyze_1 (deps, x, insn); |
2939 | |
2940 | /* Bare clobber insns are used for letting life analysis, reg-stack |
2941 | and others know that a value is dead. Depend on the last call |
2942 | instruction so that reg-stack won't get confused. */ |
2943 | if (code == CLOBBER) |
2944 | add_dependence_list (insn, deps->last_function_call, 1, |
2945 | REG_DEP_OUTPUT, true); |
2946 | } |
2947 | else if (code == PARALLEL) |
2948 | { |
2949 | for (i = XVECLEN (x, 0)(((((x)->u.fld[0]).rt_rtvec))->num_elem); i--;) |
2950 | { |
2951 | rtx sub = XVECEXP (x, 0, i)(((((x)->u.fld[0]).rt_rtvec))->elem[i]); |
2952 | code = GET_CODE (sub)((enum rtx_code) (sub)->code); |
2953 | |
2954 | if (code == COND_EXEC) |
2955 | { |
2956 | sched_analyze_2 (deps, COND_EXEC_TEST (sub)(((sub)->u.fld[0]).rt_rtx), insn); |
2957 | sub = COND_EXEC_CODE (sub)(((sub)->u.fld[1]).rt_rtx); |
2958 | code = GET_CODE (sub)((enum rtx_code) (sub)->code); |
Value stored to 'code' is never read | |
2959 | } |
2960 | else if (code == SET || code == CLOBBER) |
2961 | sched_analyze_1 (deps, sub, insn); |
2962 | else |
2963 | sched_analyze_2 (deps, sub, insn); |
2964 | } |
2965 | } |
2966 | else |
2967 | sched_analyze_2 (deps, x, insn); |
2968 | |
2969 | /* Mark registers CLOBBERED or used by called function. */ |
2970 | if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
2971 | { |
2972 | for (link = CALL_INSN_FUNCTION_USAGE (insn)(((insn)->u.fld[7]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
2973 | { |
2974 | if (GET_CODE (XEXP (link, 0))((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) == CLOBBER) |
2975 | sched_analyze_1 (deps, XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), insn); |
2976 | else if (GET_CODE (XEXP (link, 0))((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) != SET) |
2977 | sched_analyze_2 (deps, XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), insn); |
2978 | } |
2979 | /* Don't schedule anything after a tail call, tail call needs |
2980 | to use at least all call-saved registers. */ |
2981 | if (SIBLING_CALL_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SIBLING_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 2981, __FUNCTION__); _rtx; })->jump)) |
2982 | reg_pending_barrier = TRUE_BARRIER; |
2983 | else if (find_reg_note (insn, REG_SETJMP, NULLnullptr)) |
2984 | reg_pending_barrier = MOVE_BARRIER; |
2985 | } |
2986 | |
2987 | if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) |
2988 | { |
2989 | rtx_insn *next = next_nonnote_nondebug_insn (insn); |
2990 | /* ??? For tablejumps, the barrier may appear not immediately after |
2991 | the jump, but after a label and a jump_table_data insn. */ |
2992 | if (next && LABEL_P (next)(((enum rtx_code) (next)->code) == CODE_LABEL) && NEXT_INSN (next) |
2993 | && JUMP_TABLE_DATA_P (NEXT_INSN (next))(((enum rtx_code) (NEXT_INSN (next))->code) == JUMP_TABLE_DATA )) |
2994 | next = NEXT_INSN (NEXT_INSN (next)); |
2995 | if (next && BARRIER_P (next)(((enum rtx_code) (next)->code) == BARRIER)) |
2996 | reg_pending_barrier = MOVE_BARRIER; |
2997 | else |
2998 | { |
2999 | rtx_insn_list *pending; |
3000 | rtx_expr_list *pending_mem; |
3001 | |
3002 | if (sched_deps_info->compute_jump_reg_dependencies) |
3003 | { |
3004 | (*sched_deps_info->compute_jump_reg_dependencies) |
3005 | (insn, reg_pending_control_uses); |
3006 | |
3007 | /* Make latency of jump equal to 0 by using anti-dependence. */ |
3008 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_control_uses ), (0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
3009 | { |
3010 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3011 | add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, |
3012 | false); |
3013 | add_dependence_list (insn, reg_last->implicit_sets, |
3014 | 0, REG_DEP_ANTI, false); |
3015 | add_dependence_list (insn, reg_last->clobbers, 0, |
3016 | REG_DEP_ANTI, false); |
3017 | } |
3018 | } |
3019 | |
3020 | /* All memory writes and volatile reads must happen before the |
3021 | jump. Non-volatile reads must happen before the jump iff |
3022 | the result is needed by the above register used mask. */ |
3023 | |
3024 | pending = deps->pending_write_insns; |
3025 | pending_mem = deps->pending_write_mems; |
3026 | while (pending) |
3027 | { |
3028 | if (! sched_insns_conditions_mutex_p (insn, pending->insn ())) |
3029 | add_dependence (insn, pending->insn (), |
3030 | REG_DEP_OUTPUT); |
3031 | pending = pending->next (); |
3032 | pending_mem = pending_mem->next (); |
3033 | } |
3034 | |
3035 | pending = deps->pending_read_insns; |
3036 | pending_mem = deps->pending_read_mems; |
3037 | while (pending) |
3038 | { |
3039 | if (MEM_VOLATILE_P (pending_mem->element ())(__extension__ ({ __typeof ((pending_mem->element ())) const _rtx = ((pending_mem->element ())); if (((enum rtx_code) ( _rtx)->code) != MEM && ((enum rtx_code) (_rtx)-> code) != ASM_OPERANDS && ((enum rtx_code) (_rtx)-> code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P", _rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3039, __FUNCTION__); _rtx; })->volatil) |
3040 | && ! sched_insns_conditions_mutex_p (insn, pending->insn ())) |
3041 | add_dependence (insn, pending->insn (), |
3042 | REG_DEP_OUTPUT); |
3043 | pending = pending->next (); |
3044 | pending_mem = pending_mem->next (); |
3045 | } |
3046 | |
3047 | add_dependence_list (insn, deps->last_pending_memory_flush, 1, |
3048 | REG_DEP_ANTI, true); |
3049 | add_dependence_list (insn, deps->pending_jump_insns, 1, |
3050 | REG_DEP_ANTI, true); |
3051 | } |
3052 | } |
3053 | |
3054 | /* If this instruction can throw an exception, then moving it changes |
3055 | where block boundaries fall. This is mighty confusing elsewhere. |
3056 | Therefore, prevent such an instruction from being moved. Same for |
3057 | non-jump instructions that define block boundaries. |
3058 | ??? Unclear whether this is still necessary in EBB mode. If not, |
3059 | add_branch_dependences should be adjusted for RGN mode instead. */ |
3060 | if (((CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) || JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) && can_throw_internal (insn)) |
3061 | || (NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) && control_flow_insn_p (insn))) |
3062 | reg_pending_barrier = MOVE_BARRIER; |
3063 | |
3064 | if (sched_pressure != SCHED_PRESSURE_NONE) |
3065 | { |
3066 | setup_insn_reg_uses (deps, insn); |
3067 | init_insn_reg_pressure_info (insn); |
3068 | } |
3069 | |
3070 | /* Add register dependencies for insn. */ |
3071 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
3072 | { |
3073 | rtx_insn *prev = deps->last_debug_insn; |
3074 | rtx_insn_list *u; |
3075 | |
3076 | if (!deps->readonly) |
3077 | deps->last_debug_insn = insn; |
3078 | |
3079 | if (prev) |
3080 | add_dependence (insn, prev, REG_DEP_ANTI); |
3081 | |
3082 | add_dependence_list (insn, deps->last_function_call, 1, |
3083 | REG_DEP_ANTI, false); |
3084 | |
3085 | if (!sel_sched_p ()) |
3086 | for (u = deps->last_pending_memory_flush; u; u = u->next ()) |
3087 | add_dependence (insn, u->insn (), REG_DEP_ANTI); |
3088 | |
3089 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_uses), (0), & (i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (& (rsi), &(i))) |
3090 | { |
3091 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3092 | add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false); |
3093 | /* There's no point in making REG_DEP_CONTROL dependencies for |
3094 | debug insns. */ |
3095 | add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI, |
3096 | false); |
3097 | |
3098 | if (!deps->readonly) |
3099 | reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); |
3100 | } |
3101 | CLEAR_REG_SET (reg_pending_uses)bitmap_clear (reg_pending_uses); |
3102 | |
3103 | /* Quite often, a debug insn will refer to stuff in the |
3104 | previous instruction, but the reason we want this |
3105 | dependency here is to make sure the scheduler doesn't |
3106 | gratuitously move a debug insn ahead. This could dirty |
3107 | DF flags and cause additional analysis that wouldn't have |
3108 | occurred in compilation without debug insns, and such |
3109 | additional analysis can modify the generated code. */ |
3110 | prev = PREV_INSN (insn); |
3111 | |
3112 | if (prev && NONDEBUG_INSN_P (prev)((((enum rtx_code) (prev)->code) == INSN) || (((enum rtx_code ) (prev)->code) == JUMP_INSN) || (((enum rtx_code) (prev)-> code) == CALL_INSN))) |
3113 | add_dependence (insn, prev, REG_DEP_ANTI); |
3114 | } |
3115 | else |
3116 | { |
3117 | regset_head set_or_clobbered; |
3118 | |
3119 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_uses), (0), & (i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (& (rsi), &(i))) |
3120 | { |
3121 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3122 | add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false); |
3123 | add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI, |
3124 | false); |
3125 | add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE, |
3126 | false); |
3127 | |
3128 | if (!deps->readonly) |
3129 | { |
3130 | reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); |
3131 | reg_last->uses_length++; |
3132 | } |
3133 | } |
3134 | |
3135 | for (i = 0; i < FIRST_PSEUDO_REGISTER76; i++) |
3136 | if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)) |
3137 | { |
3138 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3139 | add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false); |
3140 | add_dependence_list (insn, reg_last->implicit_sets, 0, |
3141 | REG_DEP_ANTI, false); |
3142 | add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE, |
3143 | false); |
3144 | |
3145 | if (!deps->readonly) |
3146 | { |
3147 | reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); |
3148 | reg_last->uses_length++; |
3149 | } |
3150 | } |
3151 | |
3152 | if (targetm.sched.exposed_pipeline) |
3153 | { |
3154 | INIT_REG_SET (&set_or_clobbered)bitmap_initialize (&set_or_clobbered, ®_obstack); |
3155 | bitmap_ior (&set_or_clobbered, reg_pending_clobbers, |
3156 | reg_pending_sets); |
3157 | EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (&set_or_clobbered), ( 0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
3158 | { |
3159 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3160 | rtx list; |
3161 | for (list = reg_last->uses; list; list = XEXP (list, 1)(((list)->u.fld[1]).rt_rtx)) |
3162 | { |
3163 | rtx other = XEXP (list, 0)(((list)->u.fld[0]).rt_rtx); |
3164 | if (INSN_CACHED_COND (other)((&h_d_i_d[(sched_luids[INSN_UID (other)])])->cond) != const_true_rtx |
3165 | && refers_to_regno_p (i, INSN_CACHED_COND (other)((&h_d_i_d[(sched_luids[INSN_UID (other)])])->cond))) |
3166 | INSN_CACHED_COND (other)((&h_d_i_d[(sched_luids[INSN_UID (other)])])->cond) = const_true_rtx; |
3167 | } |
3168 | } |
3169 | } |
3170 | |
3171 | /* If the current insn is conditional, we can't free any |
3172 | of the lists. */ |
3173 | if (sched_has_condition_p (insn)) |
3174 | { |
3175 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_clobbers), ( 0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
3176 | { |
3177 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3178 | add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT, |
3179 | false); |
3180 | add_dependence_list (insn, reg_last->implicit_sets, 0, |
3181 | REG_DEP_ANTI, false); |
3182 | add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, |
3183 | false); |
3184 | add_dependence_list (insn, reg_last->control_uses, 0, |
3185 | REG_DEP_CONTROL, false); |
3186 | |
3187 | if (!deps->readonly) |
3188 | { |
3189 | reg_last->clobbers |
3190 | = alloc_INSN_LIST (insn, reg_last->clobbers); |
3191 | reg_last->clobbers_length++; |
3192 | } |
3193 | } |
3194 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_sets), (0), & (i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (& (rsi), &(i))) |
3195 | { |
3196 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3197 | add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT, |
3198 | false); |
3199 | add_dependence_list (insn, reg_last->implicit_sets, 0, |
3200 | REG_DEP_ANTI, false); |
3201 | add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT, |
3202 | false); |
3203 | add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, |
3204 | false); |
3205 | add_dependence_list (insn, reg_last->control_uses, 0, |
3206 | REG_DEP_CONTROL, false); |
3207 | |
3208 | if (!deps->readonly) |
3209 | reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); |
3210 | } |
3211 | } |
3212 | else |
3213 | { |
3214 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_clobbers), ( 0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
3215 | { |
3216 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3217 | if (reg_last->uses_length >= param_max_pending_list_lengthglobal_options.x_param_max_pending_list_length |
3218 | || reg_last->clobbers_length >= param_max_pending_list_lengthglobal_options.x_param_max_pending_list_length) |
3219 | { |
3220 | add_dependence_list_and_free (deps, insn, ®_last->sets, 0, |
3221 | REG_DEP_OUTPUT, false); |
3222 | add_dependence_list_and_free (deps, insn, |
3223 | ®_last->implicit_sets, 0, |
3224 | REG_DEP_ANTI, false); |
3225 | add_dependence_list_and_free (deps, insn, ®_last->uses, 0, |
3226 | REG_DEP_ANTI, false); |
3227 | add_dependence_list_and_free (deps, insn, |
3228 | ®_last->control_uses, 0, |
3229 | REG_DEP_ANTI, false); |
3230 | add_dependence_list_and_free (deps, insn, |
3231 | ®_last->clobbers, 0, |
3232 | REG_DEP_OUTPUT, false); |
3233 | |
3234 | if (!deps->readonly) |
3235 | { |
3236 | reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); |
3237 | reg_last->clobbers_length = 0; |
3238 | reg_last->uses_length = 0; |
3239 | } |
3240 | } |
3241 | else |
3242 | { |
3243 | add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT, |
3244 | false); |
3245 | add_dependence_list (insn, reg_last->implicit_sets, 0, |
3246 | REG_DEP_ANTI, false); |
3247 | add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, |
3248 | false); |
3249 | add_dependence_list (insn, reg_last->control_uses, 0, |
3250 | REG_DEP_CONTROL, false); |
3251 | } |
3252 | |
3253 | if (!deps->readonly) |
3254 | { |
3255 | reg_last->clobbers_length++; |
3256 | reg_last->clobbers |
3257 | = alloc_INSN_LIST (insn, reg_last->clobbers); |
3258 | } |
3259 | } |
3260 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_sets), (0), & (i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (& (rsi), &(i))) |
3261 | { |
3262 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3263 | |
3264 | add_dependence_list_and_free (deps, insn, ®_last->sets, 0, |
3265 | REG_DEP_OUTPUT, false); |
3266 | add_dependence_list_and_free (deps, insn, |
3267 | ®_last->implicit_sets, |
3268 | 0, REG_DEP_ANTI, false); |
3269 | add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0, |
3270 | REG_DEP_OUTPUT, false); |
3271 | add_dependence_list_and_free (deps, insn, ®_last->uses, 0, |
3272 | REG_DEP_ANTI, false); |
3273 | add_dependence_list (insn, reg_last->control_uses, 0, |
3274 | REG_DEP_CONTROL, false); |
3275 | |
3276 | if (!deps->readonly) |
3277 | { |
3278 | reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); |
3279 | reg_last->uses_length = 0; |
3280 | reg_last->clobbers_length = 0; |
3281 | } |
3282 | } |
3283 | } |
3284 | if (!deps->readonly) |
3285 | { |
3286 | EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (reg_pending_control_uses ), (0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
3287 | { |
3288 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3289 | reg_last->control_uses |
3290 | = alloc_INSN_LIST (insn, reg_last->control_uses); |
3291 | } |
3292 | } |
3293 | } |
3294 | |
3295 | for (i = 0; i < FIRST_PSEUDO_REGISTER76; i++) |
3296 | if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i)) |
3297 | { |
3298 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3299 | add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false); |
3300 | add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false); |
3301 | add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false); |
3302 | add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI, |
3303 | false); |
3304 | |
3305 | if (!deps->readonly) |
3306 | reg_last->implicit_sets |
3307 | = alloc_INSN_LIST (insn, reg_last->implicit_sets); |
3308 | } |
3309 | |
3310 | if (!deps->readonly) |
3311 | { |
3312 | IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses)bitmap_ior_into (&deps->reg_last_in_use, reg_pending_uses ); |
3313 | IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers)bitmap_ior_into (&deps->reg_last_in_use, reg_pending_clobbers ); |
3314 | IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets)bitmap_ior_into (&deps->reg_last_in_use, reg_pending_sets ); |
3315 | IOR_REG_SET_HRS (&deps->reg_last_in_use,bitmap_ior_into (&deps->reg_last_in_use, bitmap_view< HARD_REG_SET> (implicit_reg_pending_uses | implicit_reg_pending_clobbers )) |
3316 | implicit_reg_pending_usesbitmap_ior_into (&deps->reg_last_in_use, bitmap_view< HARD_REG_SET> (implicit_reg_pending_uses | implicit_reg_pending_clobbers )) |
3317 | | implicit_reg_pending_clobbers)bitmap_ior_into (&deps->reg_last_in_use, bitmap_view< HARD_REG_SET> (implicit_reg_pending_uses | implicit_reg_pending_clobbers )); |
3318 | |
3319 | /* Set up the pending barrier found. */ |
3320 | deps->last_reg_pending_barrier = reg_pending_barrier; |
3321 | } |
3322 | |
3323 | CLEAR_REG_SET (reg_pending_uses)bitmap_clear (reg_pending_uses); |
3324 | CLEAR_REG_SET (reg_pending_clobbers)bitmap_clear (reg_pending_clobbers); |
3325 | CLEAR_REG_SET (reg_pending_sets)bitmap_clear (reg_pending_sets); |
3326 | CLEAR_REG_SET (reg_pending_control_uses)bitmap_clear (reg_pending_control_uses); |
3327 | CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers); |
3328 | CLEAR_HARD_REG_SET (implicit_reg_pending_uses); |
3329 | |
3330 | /* Add dependencies if a scheduling barrier was found. */ |
3331 | if (reg_pending_barrier) |
3332 | { |
3333 | /* In the case of barrier the most added dependencies are not |
3334 | real, so we use anti-dependence here. */ |
3335 | if (sched_has_condition_p (insn)) |
3336 | { |
3337 | EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (&deps->reg_last_in_use ), (0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
3338 | { |
3339 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3340 | add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, |
3341 | true); |
3342 | add_dependence_list (insn, reg_last->sets, 0, |
3343 | reg_pending_barrier == TRUE_BARRIER |
3344 | ? REG_DEP_TRUE : REG_DEP_ANTI, true); |
3345 | add_dependence_list (insn, reg_last->implicit_sets, 0, |
3346 | REG_DEP_ANTI, true); |
3347 | add_dependence_list (insn, reg_last->clobbers, 0, |
3348 | reg_pending_barrier == TRUE_BARRIER |
3349 | ? REG_DEP_TRUE : REG_DEP_ANTI, true); |
3350 | } |
3351 | } |
3352 | else |
3353 | { |
3354 | EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (&deps->reg_last_in_use ), (0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
3355 | { |
3356 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3357 | add_dependence_list_and_free (deps, insn, ®_last->uses, 0, |
3358 | REG_DEP_ANTI, true); |
3359 | add_dependence_list_and_free (deps, insn, |
3360 | ®_last->control_uses, 0, |
3361 | REG_DEP_CONTROL, true); |
3362 | add_dependence_list_and_free (deps, insn, ®_last->sets, 0, |
3363 | reg_pending_barrier == TRUE_BARRIER |
3364 | ? REG_DEP_TRUE : REG_DEP_ANTI, |
3365 | true); |
3366 | add_dependence_list_and_free (deps, insn, |
3367 | ®_last->implicit_sets, 0, |
3368 | REG_DEP_ANTI, true); |
3369 | add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0, |
3370 | reg_pending_barrier == TRUE_BARRIER |
3371 | ? REG_DEP_TRUE : REG_DEP_ANTI, |
3372 | true); |
3373 | |
3374 | if (!deps->readonly) |
3375 | { |
3376 | reg_last->uses_length = 0; |
3377 | reg_last->clobbers_length = 0; |
3378 | } |
3379 | } |
3380 | } |
3381 | |
3382 | if (!deps->readonly) |
3383 | for (i = 0; i < (unsigned)deps->max_reg; i++) |
3384 | { |
3385 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3386 | reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); |
3387 | SET_REGNO_REG_SET (&deps->reg_last_in_use, i)bitmap_set_bit (&deps->reg_last_in_use, i); |
3388 | } |
3389 | |
3390 | /* Don't flush pending lists on speculative checks for |
3391 | selective scheduling. */ |
3392 | if (!sel_sched_p () || !sel_insn_is_speculation_check (insn)) |
3393 | flush_pending_lists (deps, insn, true, true); |
3394 | |
3395 | reg_pending_barrier = NOT_A_BARRIER; |
3396 | } |
3397 | |
3398 | /* If a post-call group is still open, see if it should remain so. |
3399 | This insn must be a simple move of a hard reg to a pseudo or |
3400 | vice-versa. |
3401 | |
3402 | We must avoid moving these insns for correctness on targets |
3403 | with small register classes, and for special registers like |
3404 | PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all |
3405 | hard regs for all targets. */ |
3406 | |
3407 | if (deps->in_post_call_group_p) |
3408 | { |
3409 | rtx tmp, set = single_set (insn); |
3410 | int src_regno, dest_regno; |
3411 | |
3412 | if (set == NULLnullptr) |
3413 | { |
3414 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
3415 | /* We don't want to mark debug insns as part of the same |
3416 | sched group. We know they really aren't, but if we use |
3417 | debug insns to tell that a call group is over, we'll |
3418 | get different code if debug insns are not there and |
3419 | instructions that follow seem like they should be part |
3420 | of the call group. |
3421 | |
3422 | Also, if we did, chain_to_prev_insn would move the |
3423 | deps of the debug insn to the call insn, modifying |
3424 | non-debug post-dependency counts of the debug insn |
3425 | dependencies and otherwise messing with the scheduling |
3426 | order. |
3427 | |
3428 | Instead, let such debug insns be scheduled freely, but |
3429 | keep the call group open in case there are insns that |
3430 | should be part of it afterwards. Since we grant debug |
3431 | insns higher priority than even sched group insns, it |
3432 | will all turn out all right. */ |
3433 | goto debug_dont_end_call_group; |
3434 | else |
3435 | goto end_call_group; |
3436 | } |
3437 | |
3438 | tmp = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
3439 | if (GET_CODE (tmp)((enum rtx_code) (tmp)->code) == SUBREG) |
3440 | tmp = SUBREG_REG (tmp)(((tmp)->u.fld[0]).rt_rtx); |
3441 | if (REG_P (tmp)(((enum rtx_code) (tmp)->code) == REG)) |
3442 | dest_regno = REGNO (tmp)(rhs_regno(tmp)); |
3443 | else |
3444 | goto end_call_group; |
3445 | |
3446 | tmp = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
3447 | if (GET_CODE (tmp)((enum rtx_code) (tmp)->code) == SUBREG) |
3448 | tmp = SUBREG_REG (tmp)(((tmp)->u.fld[0]).rt_rtx); |
3449 | if ((GET_CODE (tmp)((enum rtx_code) (tmp)->code) == PLUS |
3450 | || GET_CODE (tmp)((enum rtx_code) (tmp)->code) == MINUS) |
3451 | && REG_P (XEXP (tmp, 0))(((enum rtx_code) ((((tmp)->u.fld[0]).rt_rtx))->code) == REG) |
3452 | && REGNO (XEXP (tmp, 0))(rhs_regno((((tmp)->u.fld[0]).rt_rtx))) == STACK_POINTER_REGNUM7 |
3453 | && dest_regno == STACK_POINTER_REGNUM7) |
3454 | src_regno = STACK_POINTER_REGNUM7; |
3455 | else if (REG_P (tmp)(((enum rtx_code) (tmp)->code) == REG)) |
3456 | src_regno = REGNO (tmp)(rhs_regno(tmp)); |
3457 | else |
3458 | goto end_call_group; |
3459 | |
3460 | if (src_regno < FIRST_PSEUDO_REGISTER76 |
3461 | || dest_regno < FIRST_PSEUDO_REGISTER76) |
3462 | { |
3463 | if (!deps->readonly |
3464 | && deps->in_post_call_group_p == post_call_initial) |
3465 | deps->in_post_call_group_p = post_call; |
3466 | |
3467 | if (!sel_sched_p () || sched_emulate_haifa_p) |
3468 | { |
3469 | SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3469, __FUNCTION__); _rtx; })->in_struct) = 1; |
3470 | CANT_MOVE (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cant_move ) = 1; |
3471 | } |
3472 | } |
3473 | else |
3474 | { |
3475 | end_call_group: |
3476 | if (!deps->readonly) |
3477 | deps->in_post_call_group_p = not_post_call; |
3478 | } |
3479 | } |
3480 | |
3481 | debug_dont_end_call_group: |
3482 | if ((current_sched_info->flags & DO_SPECULATION) |
3483 | && !sched_insn_is_legitimate_for_speculation_p (insn, 0)) |
3484 | /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot |
3485 | be speculated. */ |
3486 | { |
3487 | if (sel_sched_p ()) |
3488 | sel_mark_hard_insn (insn); |
3489 | else |
3490 | { |
3491 | sd_iterator_def sd_it; |
3492 | dep_t dep; |
3493 | |
3494 | for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK(2)); |
3495 | sd_iterator_cond (&sd_it, &dep);) |
3496 | change_spec_dep_to_hard (sd_it); |
3497 | } |
3498 | } |
3499 | |
3500 | /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must |
3501 | honor their original ordering. */ |
3502 | if (find_reg_note (insn, REG_ARGS_SIZE, NULLnullptr)) |
3503 | { |
3504 | if (deps->last_args_size) |
3505 | add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT); |
3506 | if (!deps->readonly) |
3507 | deps->last_args_size = insn; |
3508 | } |
3509 | |
3510 | /* We must not mix prologue and epilogue insns. See PR78029. */ |
3511 | if (prologue_contains (insn)) |
3512 | { |
3513 | add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true); |
3514 | if (!deps->readonly) |
3515 | { |
3516 | if (deps->last_logue_was_epilogue) |
3517 | free_INSN_LIST_list (&deps->last_prologue); |
3518 | deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue); |
3519 | deps->last_logue_was_epilogue = false; |
3520 | } |
3521 | } |
3522 | |
3523 | if (epilogue_contains (insn)) |
3524 | { |
3525 | add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true); |
3526 | if (!deps->readonly) |
3527 | { |
3528 | if (!deps->last_logue_was_epilogue) |
3529 | free_INSN_LIST_list (&deps->last_epilogue); |
3530 | deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue); |
3531 | deps->last_logue_was_epilogue = true; |
3532 | } |
3533 | } |
3534 | } |
3535 | |
3536 | /* Return TRUE if INSN might not always return normally (e.g. call exit, |
3537 | longjmp, loop forever, ...). */ |
3538 | /* FIXME: Why can't this function just use flags_from_decl_or_type and |
3539 | test for ECF_NORETURN? */ |
3540 | static bool |
3541 | call_may_noreturn_p (rtx_insn *insn) |
3542 | { |
3543 | rtx call; |
3544 | |
3545 | /* const or pure calls that aren't looping will always return. */ |
3546 | if (RTL_CONST_OR_PURE_CALL_P (insn)((__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("RTL_CONST_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3546, __FUNCTION__); _rtx; })->unchanging) || (__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code ) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("RTL_PURE_CALL_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3546, __FUNCTION__); _rtx; })->return_val)) |
3547 | && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("CONST_OR_PURE_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3547, __FUNCTION__); _rtx; })->call)) |
3548 | return false; |
3549 | |
3550 | call = get_call_rtx_from (insn); |
3551 | if (call && GET_CODE (XEXP (XEXP (call, 0), 0))((enum rtx_code) (((((((call)->u.fld[0]).rt_rtx))->u.fld [0]).rt_rtx))->code) == SYMBOL_REF) |
3552 | { |
3553 | rtx symbol = XEXP (XEXP (call, 0), 0)((((((call)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
3554 | if (SYMBOL_REF_DECL (symbol)((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol) ); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3554, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree)) |
3555 | && TREE_CODE (SYMBOL_REF_DECL (symbol))((enum tree_code) (((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol)); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF ) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3555, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree)))->base.code) == FUNCTION_DECL) |
3556 | { |
3557 | if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))((built_in_class) (tree_check ((((__extension__ ({ __typeof ( (symbol)) const _rtx = ((symbol)); if (((enum rtx_code) (_rtx )->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3557, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree))), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3557, __FUNCTION__, (FUNCTION_DECL)))->function_decl.built_in_class ) |
3558 | == BUILT_IN_NORMAL) |
3559 | switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol) ); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3559, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree)))) |
3560 | { |
3561 | case BUILT_IN_BCMP: |
3562 | case BUILT_IN_BCOPY: |
3563 | case BUILT_IN_BZERO: |
3564 | case BUILT_IN_INDEX: |
3565 | case BUILT_IN_MEMCHR: |
3566 | case BUILT_IN_MEMCMP: |
3567 | case BUILT_IN_MEMCPY: |
3568 | case BUILT_IN_MEMMOVE: |
3569 | case BUILT_IN_MEMPCPY: |
3570 | case BUILT_IN_MEMSET: |
3571 | case BUILT_IN_RINDEX: |
3572 | case BUILT_IN_STPCPY: |
3573 | case BUILT_IN_STPNCPY: |
3574 | case BUILT_IN_STRCAT: |
3575 | case BUILT_IN_STRCHR: |
3576 | case BUILT_IN_STRCMP: |
3577 | case BUILT_IN_STRCPY: |
3578 | case BUILT_IN_STRCSPN: |
3579 | case BUILT_IN_STRLEN: |
3580 | case BUILT_IN_STRNCAT: |
3581 | case BUILT_IN_STRNCMP: |
3582 | case BUILT_IN_STRNCPY: |
3583 | case BUILT_IN_STRPBRK: |
3584 | case BUILT_IN_STRRCHR: |
3585 | case BUILT_IN_STRSPN: |
3586 | case BUILT_IN_STRSTR: |
3587 | /* Assume certain string/memory builtins always return. */ |
3588 | return false; |
3589 | default: |
3590 | break; |
3591 | } |
3592 | } |
3593 | } |
3594 | |
3595 | /* For all other calls assume that they might not always return. */ |
3596 | return true; |
3597 | } |
3598 | |
3599 | /* Return true if INSN should be made dependent on the previous instruction |
3600 | group, and if all INSN's dependencies should be moved to the first |
3601 | instruction of that group. */ |
3602 | |
3603 | static bool |
3604 | chain_to_prev_insn_p (rtx_insn *insn) |
3605 | { |
3606 | /* INSN forms a group with the previous instruction. */ |
3607 | if (SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3607, __FUNCTION__); _rtx; })->in_struct)) |
3608 | return true; |
3609 | |
3610 | /* If the previous instruction clobbers a register R and this one sets |
3611 | part of R, the clobber was added specifically to help us track the |
3612 | liveness of R. There's no point scheduling the clobber and leaving |
3613 | INSN behind, especially if we move the clobber to another block. */ |
3614 | rtx_insn *prev = prev_nonnote_nondebug_insn (insn); |
3615 | if (prev |
3616 | && INSN_P (prev)(((((enum rtx_code) (prev)->code) == INSN) || (((enum rtx_code ) (prev)->code) == JUMP_INSN) || (((enum rtx_code) (prev)-> code) == CALL_INSN)) || (((enum rtx_code) (prev)->code) == DEBUG_INSN)) |
3617 | && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn) |
3618 | && GET_CODE (PATTERN (prev))((enum rtx_code) (PATTERN (prev))->code) == CLOBBER) |
3619 | { |
3620 | rtx x = XEXP (PATTERN (prev), 0)(((PATTERN (prev))->u.fld[0]).rt_rtx); |
3621 | if (set_of (x, insn)) |
3622 | return true; |
3623 | } |
3624 | |
3625 | return false; |
3626 | } |
3627 | |
3628 | /* Analyze INSN with DEPS as a context. */ |
3629 | void |
3630 | deps_analyze_insn (class deps_desc *deps, rtx_insn *insn) |
3631 | { |
3632 | if (sched_deps_info->start_insn) |
3633 | sched_deps_info->start_insn (insn); |
3634 | |
3635 | /* Record the condition for this insn. */ |
3636 | if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
3637 | { |
3638 | rtx t; |
3639 | sched_get_condition_with_rev (insn, NULLnullptr); |
3640 | t = INSN_CACHED_COND (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond); |
3641 | INSN_COND_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond_deps ) = NULLnullptr; |
3642 | if (reload_completed |
3643 | && (current_sched_info->flags & DO_PREDICATION) |
3644 | && COMPARISON_P (t)(((rtx_class[(int) (((enum rtx_code) (t)->code))]) & ( ~1)) == (RTX_COMPARE & (~1))) |
3645 | && REG_P (XEXP (t, 0))(((enum rtx_code) ((((t)->u.fld[0]).rt_rtx))->code) == REG ) |
3646 | && CONSTANT_P (XEXP (t, 1))((rtx_class[(int) (((enum rtx_code) ((((t)->u.fld[1]).rt_rtx ))->code))]) == RTX_CONST_OBJ)) |
3647 | { |
3648 | unsigned int regno; |
3649 | int nregs; |
3650 | rtx_insn_list *cond_deps = NULLnullptr; |
3651 | t = XEXP (t, 0)(((t)->u.fld[0]).rt_rtx); |
3652 | regno = REGNO (t)(rhs_regno(t)); |
3653 | nregs = REG_NREGS (t)((&(t)->u.reg)->nregs); |
3654 | while (nregs-- > 0) |
3655 | { |
3656 | struct deps_reg *reg_last = &deps->reg_last[regno + nregs]; |
3657 | cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps); |
3658 | cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps); |
3659 | cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps); |
3660 | } |
3661 | INSN_COND_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cond_deps ) = cond_deps; |
3662 | } |
3663 | } |
3664 | |
3665 | if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) |
3666 | { |
3667 | /* Make each JUMP_INSN (but not a speculative check) |
3668 | a scheduling barrier for memory references. */ |
3669 | if (!deps->readonly |
3670 | && !(sel_sched_p () |
3671 | && sel_insn_is_speculation_check (insn))) |
3672 | { |
3673 | /* Keep the list a reasonable size. */ |
3674 | if (deps->pending_flush_length++ >= param_max_pending_list_lengthglobal_options.x_param_max_pending_list_length) |
3675 | flush_pending_lists (deps, insn, true, true); |
3676 | else |
3677 | deps->pending_jump_insns |
3678 | = alloc_INSN_LIST (insn, deps->pending_jump_insns); |
3679 | } |
3680 | |
3681 | /* For each insn which shouldn't cross a jump, add a dependence. */ |
3682 | add_dependence_list_and_free (deps, insn, |
3683 | &deps->sched_before_next_jump, 1, |
3684 | REG_DEP_ANTI, true); |
3685 | |
3686 | sched_analyze_insn (deps, PATTERN (insn), insn); |
3687 | } |
3688 | else if (NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) || DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
3689 | { |
3690 | sched_analyze_insn (deps, PATTERN (insn), insn); |
3691 | } |
3692 | else if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
3693 | { |
3694 | int i; |
3695 | |
3696 | CANT_MOVE (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->cant_move ) = 1; |
3697 | |
3698 | if (!reload_completed) |
3699 | { |
3700 | /* Scheduling across calls may increase register pressure by extending |
3701 | live ranges of pseudos over the call. Worse, in presence of setjmp |
3702 | it may incorrectly move up an assignment over a longjmp. */ |
3703 | reg_pending_barrier = MOVE_BARRIER; |
3704 | } |
3705 | else if (find_reg_note (insn, REG_SETJMP, NULLnullptr)) |
3706 | { |
3707 | /* This is setjmp. Assume that all registers, not just |
3708 | hard registers, may be clobbered by this call. */ |
3709 | reg_pending_barrier = MOVE_BARRIER; |
3710 | } |
3711 | else |
3712 | { |
3713 | function_abi callee_abi = insn_callee_abi (insn); |
3714 | for (i = 0; i < FIRST_PSEUDO_REGISTER76; i++) |
3715 | /* A call may read and modify global register variables. */ |
3716 | if (global_regs[i]) |
3717 | { |
3718 | SET_REGNO_REG_SET (reg_pending_sets, i)bitmap_set_bit (reg_pending_sets, i); |
3719 | SET_HARD_REG_BIT (implicit_reg_pending_uses, i); |
3720 | } |
3721 | /* Other call-clobbered hard regs may be clobbered. |
3722 | Since we only have a choice between 'might be clobbered' |
3723 | and 'definitely not clobbered', we must include all |
3724 | partly call-clobbered registers here. */ |
3725 | else if (callee_abi.clobbers_at_least_part_of_reg_p (i)) |
3726 | SET_REGNO_REG_SET (reg_pending_clobbers, i)bitmap_set_bit (reg_pending_clobbers, i); |
3727 | /* We don't know what set of fixed registers might be used |
3728 | by the function, but it is certain that the stack pointer |
3729 | is among them, but be conservative. */ |
3730 | else if (fixed_regs(this_target_hard_regs->x_fixed_regs)[i]) |
3731 | SET_HARD_REG_BIT (implicit_reg_pending_uses, i); |
3732 | /* The frame pointer is normally not used by the function |
3733 | itself, but by the debugger. */ |
3734 | /* ??? MIPS o32 is an exception. It uses the frame pointer |
3735 | in the macro expansion of jal but does not represent this |
3736 | fact in the call_insn rtl. */ |
3737 | else if (i == FRAME_POINTER_REGNUM19 |
3738 | || (i == HARD_FRAME_POINTER_REGNUM6 |
3739 | && (! reload_completed || frame_pointer_needed((&x_rtl)->frame_pointer_needed)))) |
3740 | SET_HARD_REG_BIT (implicit_reg_pending_uses, i); |
3741 | } |
3742 | |
3743 | /* For each insn which shouldn't cross a call, add a dependence |
3744 | between that insn and this call insn. */ |
3745 | add_dependence_list_and_free (deps, insn, |
3746 | &deps->sched_before_next_call, 1, |
3747 | REG_DEP_ANTI, true); |
3748 | |
3749 | sched_analyze_insn (deps, PATTERN (insn), insn); |
3750 | |
3751 | /* If CALL would be in a sched group, then this will violate |
3752 | convention that sched group insns have dependencies only on the |
3753 | previous instruction. |
3754 | |
3755 | Of course one can say: "Hey! What about head of the sched group?" |
3756 | And I will answer: "Basic principles (one dep per insn) are always |
3757 | the same." */ |
3758 | gcc_assert (!SCHED_GROUP_P (insn))((void)(!(!(__extension__ ({ __typeof ((insn)) const _rtx = ( (insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3758, __FUNCTION__); _rtx; })->in_struct)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3758, __FUNCTION__), 0 : 0)); |
3759 | |
3760 | /* In the absence of interprocedural alias analysis, we must flush |
3761 | all pending reads and writes, and start new dependencies starting |
3762 | from here. But only flush writes for constant calls (which may |
3763 | be passed a pointer to something we haven't written yet). */ |
3764 | flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn)((__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("RTL_CONST_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3764, __FUNCTION__); _rtx; })->unchanging) || (__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code ) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("RTL_PURE_CALL_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3764, __FUNCTION__); _rtx; })->return_val))); |
3765 | |
3766 | if (!deps->readonly) |
3767 | { |
3768 | /* Remember the last function call for limiting lifetimes. */ |
3769 | free_INSN_LIST_list (&deps->last_function_call); |
3770 | deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX(rtx) 0); |
3771 | |
3772 | if (call_may_noreturn_p (insn)) |
3773 | { |
3774 | /* Remember the last function call that might not always return |
3775 | normally for limiting moves of trapping insns. */ |
3776 | free_INSN_LIST_list (&deps->last_function_call_may_noreturn); |
3777 | deps->last_function_call_may_noreturn |
3778 | = alloc_INSN_LIST (insn, NULL_RTX(rtx) 0); |
3779 | } |
3780 | |
3781 | /* Before reload, begin a post-call group, so as to keep the |
3782 | lifetimes of hard registers correct. */ |
3783 | if (! reload_completed) |
3784 | deps->in_post_call_group_p = post_call; |
3785 | } |
3786 | } |
3787 | |
3788 | if (sched_deps_info->use_cselib) |
3789 | cselib_process_insn (insn); |
3790 | |
3791 | if (sched_deps_info->finish_insn) |
3792 | sched_deps_info->finish_insn (); |
3793 | |
3794 | /* Fixup the dependencies in the sched group. */ |
3795 | if ((NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) || JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) |
3796 | && chain_to_prev_insn_p (insn) |
3797 | && !sel_sched_p ()) |
3798 | chain_to_prev_insn (insn); |
3799 | } |
3800 | |
3801 | /* Initialize DEPS for the new block beginning with HEAD. */ |
3802 | void |
3803 | deps_start_bb (class deps_desc *deps, rtx_insn *head) |
3804 | { |
3805 | gcc_assert (!deps->readonly)((void)(!(!deps->readonly) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3805, __FUNCTION__), 0 : 0)); |
3806 | |
3807 | /* Before reload, if the previous block ended in a call, show that |
3808 | we are inside a post-call group, so as to keep the lifetimes of |
3809 | hard registers correct. */ |
3810 | if (! reload_completed && !LABEL_P (head)(((enum rtx_code) (head)->code) == CODE_LABEL)) |
3811 | { |
3812 | rtx_insn *insn = prev_nonnote_nondebug_insn (head); |
3813 | |
3814 | if (insn && CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
3815 | deps->in_post_call_group_p = post_call_initial; |
3816 | } |
3817 | } |
3818 | |
3819 | /* Analyze every insn between HEAD and TAIL inclusive, creating backward |
3820 | dependencies for each insn. */ |
3821 | void |
3822 | sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail) |
3823 | { |
3824 | rtx_insn *insn; |
3825 | |
3826 | if (sched_deps_info->use_cselib) |
3827 | cselib_init (CSELIB_RECORD_MEMORY); |
3828 | |
3829 | deps_start_bb (deps, head); |
3830 | |
3831 | for (insn = head;; insn = NEXT_INSN (insn)) |
3832 | { |
3833 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
3834 | { |
3835 | /* And initialize deps_lists. */ |
3836 | sd_init_insn (insn); |
3837 | /* Clean up SCHED_GROUP_P which may be set by last |
3838 | scheduler pass. */ |
3839 | if (SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3839, __FUNCTION__); _rtx; })->in_struct)) |
3840 | SCHED_GROUP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != JUMP_INSN && ((enum rtx_code) ( _rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SCHED_GROUP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3840, __FUNCTION__); _rtx; })->in_struct) = 0; |
3841 | } |
3842 | |
3843 | deps_analyze_insn (deps, insn); |
3844 | |
3845 | if (insn == tail) |
3846 | { |
3847 | if (sched_deps_info->use_cselib) |
3848 | cselib_finish (); |
3849 | return; |
3850 | } |
3851 | } |
3852 | } |
3853 | |
3854 | /* Helper for sched_free_deps (). |
3855 | Delete INSN's (RESOLVED_P) backward dependencies. */ |
3856 | static void |
3857 | delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p) |
3858 | { |
3859 | sd_iterator_def sd_it; |
3860 | dep_t dep; |
3861 | sd_list_types_def types; |
3862 | |
3863 | if (resolved_p) |
3864 | types = SD_LIST_RES_BACK(8); |
3865 | else |
3866 | types = SD_LIST_BACK((1) | (2)); |
3867 | |
3868 | for (sd_it = sd_iterator_start (insn, types); |
3869 | sd_iterator_cond (&sd_it, &dep);) |
3870 | { |
3871 | dep_link_t link = *sd_it.linkp; |
3872 | dep_node_t node = DEP_LINK_NODE (link)((link)->node); |
3873 | deps_list_t back_list; |
3874 | deps_list_t forw_list; |
3875 | |
3876 | get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list); |
3877 | remove_from_deps_list (link, back_list); |
3878 | delete_dep_node (node); |
3879 | } |
3880 | } |
3881 | |
3882 | /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with |
3883 | deps_lists. */ |
3884 | void |
3885 | sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p) |
3886 | { |
3887 | rtx_insn *insn; |
3888 | rtx_insn *next_tail = NEXT_INSN (tail); |
3889 | |
3890 | /* We make two passes since some insns may be scheduled before their |
3891 | dependencies are resolved. */ |
3892 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
3893 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) && INSN_LUID (insn)(sched_luids[INSN_UID (insn)]) > 0) |
3894 | { |
3895 | /* Clear forward deps and leave the dep_nodes to the |
3896 | corresponding back_deps list. */ |
3897 | if (resolved_p) |
3898 | clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->resolved_forw_deps )); |
3899 | else |
3900 | clear_deps_list (INSN_FORW_DEPS (insn)((&h_d_i_d[(sched_luids[INSN_UID (insn)])])->forw_deps )); |
3901 | } |
3902 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
3903 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) && INSN_LUID (insn)(sched_luids[INSN_UID (insn)]) > 0) |
3904 | { |
3905 | /* Clear resolved back deps together with its dep_nodes. */ |
3906 | delete_dep_nodes_in_back_deps (insn, resolved_p); |
3907 | |
3908 | sd_finish_insn (insn); |
3909 | } |
3910 | } |
3911 | |
3912 | /* Initialize variables for region data dependence analysis. |
3913 | When LAZY_REG_LAST is true, do not allocate reg_last array |
3914 | of class deps_desc immediately. */ |
3915 | |
3916 | void |
3917 | init_deps (class deps_desc *deps, bool lazy_reg_last) |
3918 | { |
3919 | int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER76 : max_reg_num ()); |
3920 | |
3921 | deps->max_reg = max_reg; |
3922 | if (lazy_reg_last) |
3923 | deps->reg_last = NULLnullptr; |
3924 | else |
3925 | deps->reg_last = XCNEWVEC (struct deps_reg, max_reg)((struct deps_reg *) xcalloc ((max_reg), sizeof (struct deps_reg ))); |
3926 | INIT_REG_SET (&deps->reg_last_in_use)bitmap_initialize (&deps->reg_last_in_use, ®_obstack ); |
3927 | |
3928 | deps->pending_read_insns = 0; |
3929 | deps->pending_read_mems = 0; |
3930 | deps->pending_write_insns = 0; |
3931 | deps->pending_write_mems = 0; |
3932 | deps->pending_jump_insns = 0; |
3933 | deps->pending_read_list_length = 0; |
3934 | deps->pending_write_list_length = 0; |
3935 | deps->pending_flush_length = 0; |
3936 | deps->last_pending_memory_flush = 0; |
3937 | deps->last_function_call = 0; |
3938 | deps->last_function_call_may_noreturn = 0; |
3939 | deps->sched_before_next_call = 0; |
3940 | deps->sched_before_next_jump = 0; |
3941 | deps->in_post_call_group_p = not_post_call; |
3942 | deps->last_debug_insn = 0; |
3943 | deps->last_args_size = 0; |
3944 | deps->last_prologue = 0; |
3945 | deps->last_epilogue = 0; |
3946 | deps->last_logue_was_epilogue = false; |
3947 | deps->last_reg_pending_barrier = NOT_A_BARRIER; |
3948 | deps->readonly = 0; |
3949 | } |
3950 | |
3951 | /* Init only reg_last field of DEPS, which was not allocated before as |
3952 | we inited DEPS lazily. */ |
3953 | void |
3954 | init_deps_reg_last (class deps_desc *deps) |
3955 | { |
3956 | gcc_assert (deps && deps->max_reg > 0)((void)(!(deps && deps->max_reg > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3956, __FUNCTION__), 0 : 0)); |
3957 | gcc_assert (deps->reg_last == NULL)((void)(!(deps->reg_last == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3957, __FUNCTION__), 0 : 0)); |
3958 | |
3959 | deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg)((struct deps_reg *) xcalloc ((deps->max_reg), sizeof (struct deps_reg))); |
3960 | } |
3961 | |
3962 | |
3963 | /* Free insn lists found in DEPS. */ |
3964 | |
3965 | void |
3966 | free_deps (class deps_desc *deps) |
3967 | { |
3968 | unsigned i; |
3969 | reg_set_iterator rsi; |
3970 | |
3971 | /* We set max_reg to 0 when this context was already freed. */ |
3972 | if (deps->max_reg == 0) |
3973 | { |
3974 | gcc_assert (deps->reg_last == NULL)((void)(!(deps->reg_last == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 3974, __FUNCTION__), 0 : 0)); |
3975 | return; |
3976 | } |
3977 | deps->max_reg = 0; |
3978 | |
3979 | free_INSN_LIST_list (&deps->pending_read_insns); |
3980 | free_EXPR_LIST_list (&deps->pending_read_mems); |
3981 | free_INSN_LIST_list (&deps->pending_write_insns); |
3982 | free_EXPR_LIST_list (&deps->pending_write_mems); |
3983 | free_INSN_LIST_list (&deps->last_pending_memory_flush); |
3984 | |
3985 | /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions |
3986 | times. For a testcase with 42000 regs and 8000 small basic blocks, |
3987 | this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */ |
3988 | EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (&deps->reg_last_in_use ), (0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
3989 | { |
3990 | struct deps_reg *reg_last = &deps->reg_last[i]; |
3991 | if (reg_last->uses) |
3992 | free_INSN_LIST_list (®_last->uses); |
3993 | if (reg_last->sets) |
3994 | free_INSN_LIST_list (®_last->sets); |
3995 | if (reg_last->implicit_sets) |
3996 | free_INSN_LIST_list (®_last->implicit_sets); |
3997 | if (reg_last->control_uses) |
3998 | free_INSN_LIST_list (®_last->control_uses); |
3999 | if (reg_last->clobbers) |
4000 | free_INSN_LIST_list (®_last->clobbers); |
4001 | } |
4002 | CLEAR_REG_SET (&deps->reg_last_in_use)bitmap_clear (&deps->reg_last_in_use); |
4003 | |
4004 | /* As we initialize reg_last lazily, it is possible that we didn't allocate |
4005 | it at all. */ |
4006 | free (deps->reg_last); |
4007 | deps->reg_last = NULLnullptr; |
4008 | |
4009 | deps = NULLnullptr; |
4010 | } |
4011 | |
4012 | /* Remove INSN from dependence contexts DEPS. */ |
4013 | void |
4014 | remove_from_deps (class deps_desc *deps, rtx_insn *insn) |
4015 | { |
4016 | int removed; |
4017 | unsigned i; |
4018 | reg_set_iterator rsi; |
4019 | |
4020 | removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns, |
4021 | &deps->pending_read_mems); |
4022 | if (!DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
4023 | deps->pending_read_list_length -= removed; |
4024 | removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns, |
4025 | &deps->pending_write_mems); |
4026 | deps->pending_write_list_length -= removed; |
4027 | |
4028 | removed = remove_from_dependence_list (insn, &deps->pending_jump_insns); |
4029 | deps->pending_flush_length -= removed; |
4030 | removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush); |
4031 | deps->pending_flush_length -= removed; |
4032 | |
4033 | unsigned to_clear = -1U; |
4034 | EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)for (bmp_iter_set_init (&(rsi), (&deps->reg_last_in_use ), (0), &(i)); bmp_iter_set (&(rsi), &(i)); bmp_iter_next (&(rsi), &(i))) |
4035 | { |
4036 | if (to_clear != -1U) |
4037 | { |
4038 | CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear)bitmap_clear_bit (&deps->reg_last_in_use, to_clear); |
4039 | to_clear = -1U; |
4040 | } |
4041 | struct deps_reg *reg_last = &deps->reg_last[i]; |
4042 | if (reg_last->uses) |
4043 | remove_from_dependence_list (insn, ®_last->uses); |
4044 | if (reg_last->sets) |
4045 | remove_from_dependence_list (insn, ®_last->sets); |
4046 | if (reg_last->implicit_sets) |
4047 | remove_from_dependence_list (insn, ®_last->implicit_sets); |
4048 | if (reg_last->clobbers) |
4049 | remove_from_dependence_list (insn, ®_last->clobbers); |
4050 | if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets |
4051 | && !reg_last->clobbers) |
4052 | to_clear = i; |
4053 | } |
4054 | if (to_clear != -1U) |
4055 | CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear)bitmap_clear_bit (&deps->reg_last_in_use, to_clear); |
4056 | |
4057 | if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
4058 | { |
4059 | remove_from_dependence_list (insn, &deps->last_function_call); |
4060 | remove_from_dependence_list (insn, |
4061 | &deps->last_function_call_may_noreturn); |
4062 | } |
4063 | remove_from_dependence_list (insn, &deps->sched_before_next_call); |
4064 | } |
4065 | |
4066 | /* Init deps data vector. */ |
4067 | static void |
4068 | init_deps_data_vector (void) |
4069 | { |
4070 | int reserve = (sched_max_luid + 1 - h_d_i_d.length ()); |
4071 | if (reserve > 0 && ! h_d_i_d.space (reserve)) |
4072 | h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2, true); |
4073 | } |
4074 | |
4075 | /* If it is profitable to use them, initialize or extend (depending on |
4076 | GLOBAL_P) dependency data. */ |
4077 | void |
4078 | sched_deps_init (bool global_p) |
4079 | { |
4080 | /* Average number of insns in the basic block. |
4081 | '+ 1' is used to make it nonzero. */ |
4082 | int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) + 1; |
4083 | |
4084 | init_deps_data_vector (); |
4085 | |
4086 | /* We use another caching mechanism for selective scheduling, so |
4087 | we don't use this one. */ |
4088 | if (!sel_sched_p () && global_p && insns_in_block > 100 * 5) |
4089 | { |
4090 | /* ?!? We could save some memory by computing a per-region luid mapping |
4091 | which could reduce both the number of vectors in the cache and the |
4092 | size of each vector. Instead we just avoid the cache entirely unless |
4093 | the average number of instructions in a basic block is very high. See |
4094 | the comment before the declaration of true_dependency_cache for |
4095 | what we consider "very high". */ |
4096 | cache_size = 0; |
4097 | extend_dependency_caches (sched_max_luid, true); |
4098 | } |
4099 | |
4100 | if (global_p) |
4101 | { |
4102 | dl_pool = new object_allocator<_deps_list> ("deps_list"); |
4103 | /* Allocate lists for one block at a time. */ |
4104 | dn_pool = new object_allocator<_dep_node> ("dep_node"); |
4105 | /* Allocate nodes for one block at a time. */ |
4106 | } |
4107 | } |
4108 | |
4109 | |
4110 | /* Create or extend (depending on CREATE_P) dependency caches to |
4111 | size N. */ |
4112 | void |
4113 | extend_dependency_caches (int n, bool create_p) |
4114 | { |
4115 | if (create_p || true_dependency_cache) |
4116 | { |
4117 | int i, luid = cache_size + n; |
4118 | |
4119 | true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,((bitmap_head *) xrealloc ((void *) (true_dependency_cache), sizeof (bitmap_head) * (luid))) |
4120 | luid)((bitmap_head *) xrealloc ((void *) (true_dependency_cache), sizeof (bitmap_head) * (luid))); |
4121 | output_dependency_cache = XRESIZEVEC (bitmap_head,((bitmap_head *) xrealloc ((void *) (output_dependency_cache) , sizeof (bitmap_head) * (luid))) |
4122 | output_dependency_cache, luid)((bitmap_head *) xrealloc ((void *) (output_dependency_cache) , sizeof (bitmap_head) * (luid))); |
4123 | anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,((bitmap_head *) xrealloc ((void *) (anti_dependency_cache), sizeof (bitmap_head) * (luid))) |
4124 | luid)((bitmap_head *) xrealloc ((void *) (anti_dependency_cache), sizeof (bitmap_head) * (luid))); |
4125 | control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,((bitmap_head *) xrealloc ((void *) (control_dependency_cache ), sizeof (bitmap_head) * (luid))) |
4126 | luid)((bitmap_head *) xrealloc ((void *) (control_dependency_cache ), sizeof (bitmap_head) * (luid))); |
4127 | |
4128 | if (current_sched_info->flags & DO_SPECULATION) |
4129 | spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,((bitmap_head *) xrealloc ((void *) (spec_dependency_cache), sizeof (bitmap_head) * (luid))) |
4130 | luid)((bitmap_head *) xrealloc ((void *) (spec_dependency_cache), sizeof (bitmap_head) * (luid))); |
4131 | |
4132 | for (i = cache_size; i < luid; i++) |
4133 | { |
4134 | bitmap_initialize (&true_dependency_cache[i], 0); |
4135 | bitmap_initialize (&output_dependency_cache[i], 0); |
4136 | bitmap_initialize (&anti_dependency_cache[i], 0); |
4137 | bitmap_initialize (&control_dependency_cache[i], 0); |
4138 | |
4139 | if (current_sched_info->flags & DO_SPECULATION) |
4140 | bitmap_initialize (&spec_dependency_cache[i], 0); |
4141 | } |
4142 | cache_size = luid; |
4143 | } |
4144 | } |
4145 | |
4146 | /* Finalize dependency information for the whole function. */ |
4147 | void |
4148 | sched_deps_finish (void) |
4149 | { |
4150 | gcc_assert (deps_pools_are_empty_p ())((void)(!(deps_pools_are_empty_p ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4150, __FUNCTION__), 0 : 0)); |
4151 | delete dn_pool; |
4152 | delete dl_pool; |
4153 | dn_pool = NULLnullptr; |
4154 | dl_pool = NULLnullptr; |
4155 | |
4156 | h_d_i_d.release (); |
4157 | cache_size = 0; |
4158 | |
4159 | if (true_dependency_cache) |
4160 | { |
4161 | int i; |
4162 | |
4163 | for (i = 0; i < cache_size; i++) |
4164 | { |
4165 | bitmap_clear (&true_dependency_cache[i]); |
4166 | bitmap_clear (&output_dependency_cache[i]); |
4167 | bitmap_clear (&anti_dependency_cache[i]); |
4168 | bitmap_clear (&control_dependency_cache[i]); |
4169 | |
4170 | if (sched_deps_info->generate_spec_deps) |
4171 | bitmap_clear (&spec_dependency_cache[i]); |
4172 | } |
4173 | free (true_dependency_cache); |
4174 | true_dependency_cache = NULLnullptr; |
4175 | free (output_dependency_cache); |
4176 | output_dependency_cache = NULLnullptr; |
4177 | free (anti_dependency_cache); |
4178 | anti_dependency_cache = NULLnullptr; |
4179 | free (control_dependency_cache); |
4180 | control_dependency_cache = NULLnullptr; |
4181 | |
4182 | if (sched_deps_info->generate_spec_deps) |
4183 | { |
4184 | free (spec_dependency_cache); |
4185 | spec_dependency_cache = NULLnullptr; |
4186 | } |
4187 | |
4188 | } |
4189 | } |
4190 | |
4191 | /* Initialize some global variables needed by the dependency analysis |
4192 | code. */ |
4193 | |
4194 | void |
4195 | init_deps_global (void) |
4196 | { |
4197 | CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers); |
4198 | CLEAR_HARD_REG_SET (implicit_reg_pending_uses); |
4199 | reg_pending_sets = ALLOC_REG_SET (®_obstack)bitmap_alloc (®_obstack); |
4200 | reg_pending_clobbers = ALLOC_REG_SET (®_obstack)bitmap_alloc (®_obstack); |
4201 | reg_pending_uses = ALLOC_REG_SET (®_obstack)bitmap_alloc (®_obstack); |
4202 | reg_pending_control_uses = ALLOC_REG_SET (®_obstack)bitmap_alloc (®_obstack); |
4203 | reg_pending_barrier = NOT_A_BARRIER; |
4204 | |
4205 | if (!sel_sched_p () || sched_emulate_haifa_p) |
4206 | { |
4207 | sched_deps_info->start_insn = haifa_start_insn; |
4208 | sched_deps_info->finish_insn = haifa_finish_insn; |
4209 | |
4210 | sched_deps_info->note_reg_set = haifa_note_reg_set; |
4211 | sched_deps_info->note_reg_clobber = haifa_note_reg_clobber; |
4212 | sched_deps_info->note_reg_use = haifa_note_reg_use; |
4213 | |
4214 | sched_deps_info->note_mem_dep = haifa_note_mem_dep; |
4215 | sched_deps_info->note_dep = haifa_note_dep; |
4216 | } |
4217 | } |
4218 | |
4219 | /* Free everything used by the dependency analysis code. */ |
4220 | |
4221 | void |
4222 | finish_deps_global (void) |
4223 | { |
4224 | FREE_REG_SET (reg_pending_sets)((void) (bitmap_obstack_free ((bitmap) reg_pending_sets), (reg_pending_sets ) = (bitmap) nullptr)); |
4225 | FREE_REG_SET (reg_pending_clobbers)((void) (bitmap_obstack_free ((bitmap) reg_pending_clobbers), (reg_pending_clobbers) = (bitmap) nullptr)); |
4226 | FREE_REG_SET (reg_pending_uses)((void) (bitmap_obstack_free ((bitmap) reg_pending_uses), (reg_pending_uses ) = (bitmap) nullptr)); |
4227 | FREE_REG_SET (reg_pending_control_uses)((void) (bitmap_obstack_free ((bitmap) reg_pending_control_uses ), (reg_pending_control_uses) = (bitmap) nullptr)); |
4228 | } |
4229 | |
4230 | /* Estimate the weakness of dependence between MEM1 and MEM2. */ |
4231 | dw_t |
4232 | estimate_dep_weak (rtx mem1, rtx mem2) |
4233 | { |
4234 | if (mem1 == mem2) |
4235 | /* MEMs are the same - don't speculate. */ |
4236 | return MIN_DEP_WEAK1; |
4237 | |
4238 | rtx r1 = XEXP (mem1, 0)(((mem1)->u.fld[0]).rt_rtx); |
4239 | rtx r2 = XEXP (mem2, 0)(((mem2)->u.fld[0]).rt_rtx); |
4240 | |
4241 | if (sched_deps_info->use_cselib) |
4242 | { |
4243 | /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be |
4244 | dangling at this point, since we never preserve them. Instead we |
4245 | canonicalize manually to get stable VALUEs out of hashing. */ |
4246 | if (GET_CODE (r1)((enum rtx_code) (r1)->code) == VALUE && CSELIB_VAL_PTR (r1)(((r1)->u.fld[0]).rt_cselib)) |
4247 | r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1)(((r1)->u.fld[0]).rt_cselib))->val_rtx; |
4248 | if (GET_CODE (r2)((enum rtx_code) (r2)->code) == VALUE && CSELIB_VAL_PTR (r2)(((r2)->u.fld[0]).rt_cselib)) |
4249 | r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2)(((r2)->u.fld[0]).rt_cselib))->val_rtx; |
4250 | } |
4251 | |
4252 | if (r1 == r2 |
4253 | || (REG_P (r1)(((enum rtx_code) (r1)->code) == REG) && REG_P (r2)(((enum rtx_code) (r2)->code) == REG) && REGNO (r1)(rhs_regno(r1)) == REGNO (r2)(rhs_regno(r2)))) |
4254 | /* Again, MEMs are the same. */ |
4255 | return MIN_DEP_WEAK1; |
4256 | else if ((REG_P (r1)(((enum rtx_code) (r1)->code) == REG) && !REG_P (r2)(((enum rtx_code) (r2)->code) == REG)) || (!REG_P (r1)(((enum rtx_code) (r1)->code) == REG) && REG_P (r2)(((enum rtx_code) (r2)->code) == REG))) |
4257 | /* Different addressing modes - reason to be more speculative, |
4258 | than usual. */ |
4259 | return NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1) - (NO_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) + 1) - UNCERTAIN_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) - (((1 << (((8 * 4) - 8) / 4)) - 1) - 1) / 4)) / 2; |
4260 | else |
4261 | /* We can't say anything about the dependence. */ |
4262 | return UNCERTAIN_DEP_WEAK((((1 << (((8 * 4) - 8) / 4)) - 1) - 1) - (((1 << (((8 * 4) - 8) / 4)) - 1) - 1) / 4); |
4263 | } |
4264 | |
4265 | /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE. |
4266 | This function can handle same INSN and ELEM (INSN == ELEM). |
4267 | It is a convenience wrapper. */ |
4268 | static void |
4269 | add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type) |
4270 | { |
4271 | ds_t ds; |
4272 | bool internal; |
4273 | |
4274 | if (dep_type == REG_DEP_TRUE) |
4275 | ds = DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))); |
4276 | else if (dep_type == REG_DEP_OUTPUT) |
4277 | ds = DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1); |
4278 | else if (dep_type == REG_DEP_CONTROL) |
4279 | ds = DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) << 1) << 1); |
4280 | else |
4281 | { |
4282 | gcc_assert (dep_type == REG_DEP_ANTI)((void)(!(dep_type == REG_DEP_ANTI) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4282, __FUNCTION__), 0 : 0)); |
4283 | ds = DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1); |
4284 | } |
4285 | |
4286 | /* When add_dependence is called from inside sched-deps.cc, we expect |
4287 | cur_insn to be non-null. */ |
4288 | internal = cur_insn != NULLnullptr; |
4289 | if (internal) |
4290 | gcc_assert (insn == cur_insn)((void)(!(insn == cur_insn) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4290, __FUNCTION__), 0 : 0)); |
4291 | else |
4292 | cur_insn = insn; |
4293 | |
4294 | note_dep (elem, ds); |
4295 | if (!internal) |
4296 | cur_insn = NULLnullptr; |
4297 | } |
4298 | |
4299 | /* Return weakness of speculative type TYPE in the dep_status DS, |
4300 | without checking to prevent ICEs on malformed input. */ |
4301 | static dw_t |
4302 | get_dep_weak_1 (ds_t ds, ds_t type) |
4303 | { |
4304 | ds = ds & type; |
4305 | |
4306 | switch (type) |
4307 | { |
4308 | case BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ): ds >>= BEGIN_DATA_BITS_OFFSET; break; |
4309 | case BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET ): ds >>= BE_IN_DATA_BITS_OFFSET; break; |
4310 | case BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ): ds >>= BEGIN_CONTROL_BITS_OFFSET; break; |
4311 | case BE_IN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ): ds >>= BE_IN_CONTROL_BITS_OFFSET; break; |
4312 | default: gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4312, __FUNCTION__)); |
4313 | } |
4314 | |
4315 | return (dw_t) ds; |
4316 | } |
4317 | |
4318 | /* Return weakness of speculative type TYPE in the dep_status DS. */ |
4319 | dw_t |
4320 | get_dep_weak (ds_t ds, ds_t type) |
4321 | { |
4322 | dw_t dw = get_dep_weak_1 (ds, type); |
4323 | |
4324 | gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK)((void)(!(1 <= dw && dw <= (((1 << (((8 * 4) - 8) / 4)) - 1) - 1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4324, __FUNCTION__), 0 : 0)); |
4325 | return dw; |
4326 | } |
4327 | |
4328 | /* Return the dep_status, which has the same parameters as DS, except for |
4329 | speculative type TYPE, that will have weakness DW. */ |
4330 | ds_t |
4331 | set_dep_weak (ds_t ds, ds_t type, dw_t dw) |
4332 | { |
4333 | gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK)((void)(!(1 <= dw && dw <= (((1 << (((8 * 4) - 8) / 4)) - 1) - 1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4333, __FUNCTION__), 0 : 0)); |
4334 | |
4335 | ds &= ~type; |
4336 | switch (type) |
4337 | { |
4338 | case BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ): ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break; |
4339 | case BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET ): ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break; |
4340 | case BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ): ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break; |
4341 | case BE_IN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ): ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break; |
4342 | default: gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4342, __FUNCTION__)); |
4343 | } |
4344 | return ds; |
4345 | } |
4346 | |
4347 | /* Return the join of two dep_statuses DS1 and DS2. |
4348 | If MAX_P is true then choose the greater probability, |
4349 | otherwise multiply probabilities. |
4350 | This function assumes that both DS1 and DS2 contain speculative bits. */ |
4351 | static ds_t |
4352 | ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p) |
4353 | { |
4354 | ds_t ds, t; |
4355 | |
4356 | gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE))((void)(!((ds1 & (((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET)))) && (ds2 & (((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1) ) << BEGIN_DATA_BITS_OFFSET) | (((ds_t) ((1 << (( (8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ( (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET))))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4356, __FUNCTION__), 0 : 0)); |
4357 | |
4358 | ds = (ds1 & DEP_TYPES((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) | ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1 ) << 1) | ((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) << 1))) | (ds2 & DEP_TYPES((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) | ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1 ) << 1) | ((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) << 1))); |
4359 | |
4360 | t = FIRST_SPEC_TYPE(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ); |
4361 | do |
4362 | { |
4363 | if ((ds1 & t) && !(ds2 & t)) |
4364 | ds |= ds1 & t; |
4365 | else if (!(ds1 & t) && (ds2 & t)) |
4366 | ds |= ds2 & t; |
4367 | else if ((ds1 & t) && (ds2 & t)) |
4368 | { |
4369 | dw_t dw1 = get_dep_weak (ds1, t); |
4370 | dw_t dw2 = get_dep_weak (ds2, t); |
4371 | ds_t dw; |
4372 | |
4373 | if (!max_p) |
4374 | { |
4375 | dw = ((ds_t) dw1) * ((ds_t) dw2); |
4376 | dw /= MAX_DEP_WEAK(((1 << (((8 * 4) - 8) / 4)) - 1) - 1); |
4377 | if (dw < MIN_DEP_WEAK1) |
4378 | dw = MIN_DEP_WEAK1; |
4379 | } |
4380 | else |
4381 | { |
4382 | if (dw1 >= dw2) |
4383 | dw = dw1; |
4384 | else |
4385 | dw = dw2; |
4386 | } |
4387 | |
4388 | ds = set_dep_weak (ds, t, (dw_t) dw); |
4389 | } |
4390 | |
4391 | if (t == LAST_SPEC_TYPE(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )) |
4392 | break; |
4393 | t <<= SPEC_TYPE_SHIFT(((8 * 4) - 8) / 4); |
4394 | } |
4395 | while (1); |
4396 | |
4397 | return ds; |
4398 | } |
4399 | |
4400 | /* Return the join of two dep_statuses DS1 and DS2. |
4401 | This function assumes that both DS1 and DS2 contain speculative bits. */ |
4402 | ds_t |
4403 | ds_merge (ds_t ds1, ds_t ds2) |
4404 | { |
4405 | return ds_merge_1 (ds1, ds2, false); |
4406 | } |
4407 | |
4408 | /* Return the join of two dep_statuses DS1 and DS2. */ |
4409 | ds_t |
4410 | ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2) |
4411 | { |
4412 | ds_t new_status = ds | ds2; |
4413 | |
4414 | if (new_status & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) |
4415 | { |
4416 | if ((ds && !(ds & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))))) |
4417 | || (ds2 && !(ds2 & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))))) |
4418 | /* Then this dep can't be speculative. */ |
4419 | new_status &= ~SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
4420 | else |
4421 | { |
4422 | /* Both are speculative. Merging probabilities. */ |
4423 | if (mem1) |
4424 | { |
4425 | dw_t dw; |
4426 | |
4427 | dw = estimate_dep_weak (mem1, mem2); |
4428 | ds = set_dep_weak (ds, BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ), dw); |
4429 | } |
4430 | |
4431 | if (!ds) |
4432 | new_status = ds2; |
4433 | else if (!ds2) |
4434 | new_status = ds; |
4435 | else |
4436 | new_status = ds_merge (ds2, ds); |
4437 | } |
4438 | } |
4439 | |
4440 | return new_status; |
4441 | } |
4442 | |
4443 | /* Return the join of DS1 and DS2. Use maximum instead of multiplying |
4444 | probabilities. */ |
4445 | ds_t |
4446 | ds_max_merge (ds_t ds1, ds_t ds2) |
4447 | { |
4448 | if (ds1 == 0 && ds2 == 0) |
4449 | return 0; |
4450 | |
4451 | if (ds1 == 0 && ds2 != 0) |
4452 | return ds2; |
4453 | |
4454 | if (ds1 != 0 && ds2 == 0) |
4455 | return ds1; |
4456 | |
4457 | return ds_merge_1 (ds1, ds2, true); |
4458 | } |
4459 | |
4460 | /* Return the probability of speculation success for the speculation |
4461 | status DS. */ |
4462 | dw_t |
4463 | ds_weak (ds_t ds) |
4464 | { |
4465 | ds_t res = 1, dt; |
4466 | int n = 0; |
4467 | |
4468 | dt = FIRST_SPEC_TYPE(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ); |
4469 | do |
4470 | { |
4471 | if (ds & dt) |
4472 | { |
4473 | res *= (ds_t) get_dep_weak (ds, dt); |
4474 | n++; |
4475 | } |
4476 | |
4477 | if (dt == LAST_SPEC_TYPE(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )) |
4478 | break; |
4479 | dt <<= SPEC_TYPE_SHIFT(((8 * 4) - 8) / 4); |
4480 | } |
4481 | while (1); |
4482 | |
4483 | gcc_assert (n)((void)(!(n) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4483, __FUNCTION__), 0 : 0)); |
4484 | while (--n) |
4485 | res /= MAX_DEP_WEAK(((1 << (((8 * 4) - 8) / 4)) - 1) - 1); |
4486 | |
4487 | if (res < MIN_DEP_WEAK1) |
4488 | res = MIN_DEP_WEAK1; |
4489 | |
4490 | gcc_assert (res <= MAX_DEP_WEAK)((void)(!(res <= (((1 << (((8 * 4) - 8) / 4)) - 1) - 1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4490, __FUNCTION__), 0 : 0)); |
4491 | |
4492 | return (dw_t) res; |
4493 | } |
4494 | |
4495 | /* Return a dep status that contains all speculation types of DS. */ |
4496 | ds_t |
4497 | ds_get_speculation_types (ds_t ds) |
4498 | { |
4499 | if (ds & BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET )) |
4500 | ds |= BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ); |
4501 | if (ds & BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET )) |
4502 | ds |= BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET ); |
4503 | if (ds & BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET )) |
4504 | ds |= BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ); |
4505 | if (ds & BE_IN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )) |
4506 | ds |= BE_IN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ); |
4507 | |
4508 | return ds & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
4509 | } |
4510 | |
4511 | /* Return a dep status that contains maximal weakness for each speculation |
4512 | type present in DS. */ |
4513 | ds_t |
4514 | ds_get_max_dep_weak (ds_t ds) |
4515 | { |
4516 | if (ds & BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET )) |
4517 | ds = set_dep_weak (ds, BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ), MAX_DEP_WEAK(((1 << (((8 * 4) - 8) / 4)) - 1) - 1)); |
4518 | if (ds & BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET )) |
4519 | ds = set_dep_weak (ds, BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET ), MAX_DEP_WEAK(((1 << (((8 * 4) - 8) / 4)) - 1) - 1)); |
4520 | if (ds & BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET )) |
4521 | ds = set_dep_weak (ds, BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ), MAX_DEP_WEAK(((1 << (((8 * 4) - 8) / 4)) - 1) - 1)); |
4522 | if (ds & BE_IN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )) |
4523 | ds = set_dep_weak (ds, BE_IN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ), MAX_DEP_WEAK(((1 << (((8 * 4) - 8) / 4)) - 1) - 1)); |
4524 | |
4525 | return ds; |
4526 | } |
4527 | |
4528 | /* Dump information about the dependence status S. */ |
4529 | static void |
4530 | dump_ds (FILE *f, ds_t s) |
4531 | { |
4532 | fprintf (f, "{"); |
4533 | |
4534 | if (s & BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET )) |
4535 | fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ))); |
4536 | if (s & BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET )) |
4537 | fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET ))); |
4538 | if (s & BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET )) |
4539 | fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET ))); |
4540 | if (s & BE_IN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )) |
4541 | fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET ))); |
4542 | |
4543 | if (s & HARD_DEP(((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) << 1) << 1) << 1)) |
4544 | fprintf (f, "HARD_DEP; "); |
4545 | |
4546 | if (s & DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))) |
4547 | fprintf (f, "DEP_TRUE; "); |
4548 | if (s & DEP_OUTPUT((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1)) |
4549 | fprintf (f, "DEP_OUTPUT; "); |
4550 | if (s & DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) |
4551 | fprintf (f, "DEP_ANTI; "); |
4552 | if (s & DEP_CONTROL((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) << 1) << 1)) |
4553 | fprintf (f, "DEP_CONTROL; "); |
4554 | |
4555 | fprintf (f, "}"); |
4556 | } |
4557 | |
4558 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
4559 | debug_ds (ds_t s) |
4560 | { |
4561 | dump_ds (stderrstderr, s); |
4562 | fprintf (stderrstderr, "\n"); |
4563 | } |
4564 | |
4565 | /* Verify that dependence type and status are consistent. |
4566 | If RELAXED_P is true, then skip dep_weakness checks. */ |
4567 | static void |
4568 | check_dep (dep_t dep, bool relaxed_p) |
4569 | { |
4570 | enum reg_note dt = DEP_TYPE (dep)((dep)->type); |
4571 | ds_t ds = DEP_STATUS (dep)((dep)->status); |
4572 | |
4573 | gcc_assert (DEP_PRO (dep) != DEP_CON (dep))((void)(!(((dep)->pro) != ((dep)->con)) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4573, __FUNCTION__), 0 : 0)); |
4574 | |
4575 | if (!(current_sched_info->flags & USE_DEPS_LIST)) |
4576 | { |
4577 | gcc_assert (ds == 0)((void)(!(ds == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4577, __FUNCTION__), 0 : 0)); |
4578 | return; |
4579 | } |
4580 | |
4581 | /* Check that dependence type contains the same bits as the status. */ |
4582 | if (dt == REG_DEP_TRUE) |
4583 | gcc_assert (ds & DEP_TRUE)((void)(!(ds & (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4583, __FUNCTION__), 0 : 0)); |
4584 | else if (dt == REG_DEP_OUTPUT) |
4585 | gcc_assert ((ds & DEP_OUTPUT)((void)(!((ds & ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1)) && !(ds & ( ((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8 ) / 4))))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4586, __FUNCTION__), 0 : 0)) |
4586 | && !(ds & DEP_TRUE))((void)(!((ds & ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1)) && !(ds & ( ((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8 ) / 4))))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4586, __FUNCTION__), 0 : 0)); |
4587 | else if (dt == REG_DEP_ANTI) |
4588 | gcc_assert ((ds & DEP_ANTI)((void)(!((ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) && ! (ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) | (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4589, __FUNCTION__), 0 : 0)) |
4589 | && !(ds & (DEP_OUTPUT | DEP_TRUE)))((void)(!((ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) && ! (ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) | (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4589, __FUNCTION__), 0 : 0)); |
4590 | else |
4591 | gcc_assert (dt == REG_DEP_CONTROL((void)(!(dt == REG_DEP_CONTROL && (ds & ((((((ds_t ) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4 ))) << 1) << 1) << 1)) && !(ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) | (((ds_t) 1 ) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) ))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4593, __FUNCTION__), 0 : 0)) |
4592 | && (ds & DEP_CONTROL)((void)(!(dt == REG_DEP_CONTROL && (ds & ((((((ds_t ) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4 ))) << 1) << 1) << 1)) && !(ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) | (((ds_t) 1 ) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) ))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4593, __FUNCTION__), 0 : 0)) |
4593 | && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)))((void)(!(dt == REG_DEP_CONTROL && (ds & ((((((ds_t ) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4 ))) << 1) << 1) << 1)) && !(ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4 ) - 8) / 4))) << 1) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) | (((ds_t) 1 ) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) ))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4593, __FUNCTION__), 0 : 0)); |
4594 | |
4595 | /* HARD_DEP cannot appear in dep_status of a link. */ |
4596 | gcc_assert (!(ds & HARD_DEP))((void)(!(!(ds & (((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) << 1) << 1))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4596, __FUNCTION__), 0 : 0)); |
4597 | |
4598 | /* Check that dependence status is set correctly when speculation is not |
4599 | supported. */ |
4600 | if (!sched_deps_info->generate_spec_deps) |
4601 | gcc_assert (!(ds & SPECULATIVE))((void)(!(!(ds & (((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET))))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4601, __FUNCTION__), 0 : 0)); |
4602 | else if (ds & SPECULATIVE(((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET)) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET) | (((ds_t ) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) |
4603 | { |
4604 | if (!relaxed_p) |
4605 | { |
4606 | ds_t type = FIRST_SPEC_TYPE(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ); |
4607 | |
4608 | /* Check that dependence weakness is in proper range. */ |
4609 | do |
4610 | { |
4611 | if (ds & type) |
4612 | get_dep_weak (ds, type); |
4613 | |
4614 | if (type == LAST_SPEC_TYPE(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )) |
4615 | break; |
4616 | type <<= SPEC_TYPE_SHIFT(((8 * 4) - 8) / 4); |
4617 | } |
4618 | while (1); |
4619 | } |
4620 | |
4621 | if (ds & BEGIN_SPEC((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET ) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET))) |
4622 | { |
4623 | /* Only true dependence can be data speculative. */ |
4624 | if (ds & BEGIN_DATA(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_DATA_BITS_OFFSET )) |
4625 | gcc_assert (ds & DEP_TRUE)((void)(!(ds & (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4625, __FUNCTION__), 0 : 0)); |
4626 | |
4627 | /* Control dependencies in the insn scheduler are represented by |
4628 | anti-dependencies, therefore only anti dependence can be |
4629 | control speculative. */ |
4630 | if (ds & BEGIN_CONTROL(((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET )) |
4631 | gcc_assert (ds & DEP_ANTI)((void)(!(ds & (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4631, __FUNCTION__), 0 : 0)); |
4632 | } |
4633 | else |
4634 | { |
4635 | /* Subsequent speculations should resolve true dependencies. */ |
4636 | gcc_assert ((ds & DEP_TYPES) == DEP_TRUE)((void)(!((ds & ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) | ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) | (((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1 ) << 1) | ((((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1) << 1)) ) == (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4636, __FUNCTION__), 0 : 0)); |
4637 | } |
4638 | |
4639 | /* Check that true and anti dependencies can't have other speculative |
4640 | statuses. */ |
4641 | if (ds & DEP_TRUE(((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4)))) |
4642 | gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC))((void)(!(ds & ((((ds_t) ((1 << (((8 * 4) - 8) / 4) ) - 1)) << BEGIN_DATA_BITS_OFFSET) | ((((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_DATA_BITS_OFFSET) | (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BE_IN_CONTROL_BITS_OFFSET )))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4642, __FUNCTION__), 0 : 0)); |
4643 | /* An output dependence can't be speculative at all. */ |
4644 | gcc_assert (!(ds & DEP_OUTPUT))((void)(!(!(ds & ((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4644, __FUNCTION__), 0 : 0)); |
4645 | if (ds & DEP_ANTI(((((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + (((8 * 4) - 8) / 4))) << 1) << 1)) |
4646 | gcc_assert (ds & BEGIN_CONTROL)((void)(!(ds & (((ds_t) ((1 << (((8 * 4) - 8) / 4)) - 1)) << BEGIN_CONTROL_BITS_OFFSET)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4646, __FUNCTION__), 0 : 0)); |
4647 | } |
4648 | } |
4649 | |
4650 | /* The following code discovers opportunities to switch a memory reference |
4651 | and an increment by modifying the address. We ensure that this is done |
4652 | only for dependencies that are only used to show a single register |
4653 | dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory |
4654 | instruction involved is subject to only one dep that can cause a pattern |
4655 | change. |
4656 | |
4657 | When we discover a suitable dependency, we fill in the dep_replacement |
4658 | structure to show how to modify the memory reference. */ |
4659 | |
4660 | /* Holds information about a pair of memory reference and register increment |
4661 | insns which depend on each other, but could possibly be interchanged. */ |
4662 | struct mem_inc_info |
4663 | { |
4664 | rtx_insn *inc_insn; |
4665 | rtx_insn *mem_insn; |
4666 | |
4667 | rtx *mem_loc; |
4668 | /* A register occurring in the memory address for which we wish to break |
4669 | the dependence. This must be identical to the destination register of |
4670 | the increment. */ |
4671 | rtx mem_reg0; |
4672 | /* Any kind of index that is added to that register. */ |
4673 | rtx mem_index; |
4674 | /* The constant offset used in the memory address. */ |
4675 | HOST_WIDE_INTlong mem_constant; |
4676 | /* The constant added in the increment insn. Negated if the increment is |
4677 | after the memory address. */ |
4678 | HOST_WIDE_INTlong inc_constant; |
4679 | /* The source register used in the increment. May be different from mem_reg0 |
4680 | if the increment occurs before the memory address. */ |
4681 | rtx inc_input; |
4682 | }; |
4683 | |
4684 | /* Verify that the memory location described in MII can be replaced with |
4685 | one using NEW_ADDR. Return the new memory reference or NULL_RTX. The |
4686 | insn remains unchanged by this function. */ |
4687 | |
4688 | static rtx |
4689 | attempt_change (struct mem_inc_info *mii, rtx new_addr) |
4690 | { |
4691 | rtx mem = *mii->mem_loc; |
4692 | rtx new_mem; |
4693 | |
4694 | if (!targetm.new_address_profitable_p (mem, mii->mem_insn, new_addr)) |
4695 | return NULL_RTX(rtx) 0; |
4696 | |
4697 | /* Jump through a lot of hoops to keep the attributes up to date. We |
4698 | do not want to call one of the change address variants that take |
4699 | an offset even though we know the offset in many cases. These |
4700 | assume you are changing where the address is pointing by the |
4701 | offset. */ |
4702 | new_mem = replace_equiv_address_nv (mem, new_addr); |
4703 | if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0)) |
4704 | { |
4705 | if (sched_verbose >= 5) |
4706 | fprintf (sched_dump, "validation failure\n"); |
4707 | return NULL_RTX(rtx) 0; |
4708 | } |
4709 | |
4710 | /* Put back the old one. */ |
4711 | validate_change (mii->mem_insn, mii->mem_loc, mem, 0); |
4712 | |
4713 | return new_mem; |
4714 | } |
4715 | |
4716 | /* Return true if INSN is of a form "a = b op c" where a and b are |
4717 | regs. op is + if c is a reg and +|- if c is a const. Fill in |
4718 | informantion in MII about what is found. |
4719 | BEFORE_MEM indicates whether the increment is found before or after |
4720 | a corresponding memory reference. */ |
4721 | |
4722 | static bool |
4723 | parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem) |
4724 | { |
4725 | rtx pat = single_set (insn); |
4726 | rtx src, cst; |
4727 | bool regs_equal; |
4728 | |
4729 | if (RTX_FRAME_RELATED_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != CALL_INSN && ((enum rtx_code) ( _rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx )->code) != BARRIER && ((enum rtx_code) (_rtx)-> code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4729, __FUNCTION__); _rtx; })->frame_related) || !pat) |
4730 | return false; |
4731 | |
4732 | /* Do not allow breaking data dependencies for insns that are marked |
4733 | with REG_STACK_CHECK. */ |
4734 | if (find_reg_note (insn, REG_STACK_CHECK, NULLnullptr)) |
4735 | return false; |
4736 | |
4737 | /* Result must be single reg. */ |
4738 | if (!REG_P (SET_DEST (pat))(((enum rtx_code) ((((pat)->u.fld[0]).rt_rtx))->code) == REG)) |
4739 | return false; |
4740 | |
4741 | if (GET_CODE (SET_SRC (pat))((enum rtx_code) ((((pat)->u.fld[1]).rt_rtx))->code) != PLUS) |
4742 | return false; |
4743 | |
4744 | mii->inc_insn = insn; |
4745 | src = SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx); |
4746 | mii->inc_input = XEXP (src, 0)(((src)->u.fld[0]).rt_rtx); |
4747 | |
4748 | if (!REG_P (XEXP (src, 0))(((enum rtx_code) ((((src)->u.fld[0]).rt_rtx))->code) == REG)) |
4749 | return false; |
4750 | |
4751 | if (!rtx_equal_p (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx), mii->mem_reg0)) |
4752 | return false; |
4753 | |
4754 | cst = XEXP (src, 1)(((src)->u.fld[1]).rt_rtx); |
4755 | if (!CONST_INT_P (cst)(((enum rtx_code) (cst)->code) == CONST_INT)) |
4756 | return false; |
4757 | mii->inc_constant = INTVAL (cst)((cst)->u.hwint[0]); |
4758 | |
4759 | regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0); |
4760 | |
4761 | if (!before_mem) |
4762 | { |
4763 | mii->inc_constant = -mii->inc_constant; |
4764 | if (!regs_equal) |
4765 | return false; |
4766 | } |
4767 | |
4768 | if (regs_equal && REGNO (SET_DEST (pat))(rhs_regno((((pat)->u.fld[0]).rt_rtx))) == STACK_POINTER_REGNUM7) |
4769 | { |
4770 | /* Note that the sign has already been reversed for !before_mem. */ |
4771 | if (STACK_GROWS_DOWNWARD1) |
4772 | return mii->inc_constant > 0; |
4773 | else |
4774 | return mii->inc_constant < 0; |
4775 | } |
4776 | return true; |
4777 | } |
4778 | |
4779 | /* Once a suitable mem reference has been found and the corresponding data |
4780 | in MII has been filled in, this function is called to find a suitable |
4781 | add or inc insn involving the register we found in the memory |
4782 | reference. */ |
4783 | |
4784 | static bool |
4785 | find_inc (struct mem_inc_info *mii, bool backwards) |
4786 | { |
4787 | sd_iterator_def sd_it; |
4788 | dep_t dep; |
4789 | |
4790 | sd_it = sd_iterator_start (mii->mem_insn, |
4791 | backwards ? SD_LIST_HARD_BACK(1) : SD_LIST_FORW(4)); |
4792 | while (sd_iterator_cond (&sd_it, &dep)) |
4793 | { |
4794 | dep_node_t node = DEP_LINK_NODE (*sd_it.linkp)((*sd_it.linkp)->node); |
4795 | rtx_insn *pro = DEP_PRO (dep)((dep)->pro); |
4796 | rtx_insn *con = DEP_CON (dep)((dep)->con); |
4797 | rtx_insn *inc_cand = backwards ? pro : con; |
4798 | if (DEP_NONREG (dep)((dep)->nonreg) || DEP_MULTIPLE (dep)((dep)->multiple)) |
4799 | goto next; |
4800 | if (parse_add_or_inc (mii, inc_cand, backwards)) |
4801 | { |
4802 | struct dep_replacement *desc; |
4803 | df_ref def; |
4804 | rtx newaddr, newmem; |
4805 | |
4806 | if (sched_verbose >= 5) |
4807 | fprintf (sched_dump, "candidate mem/inc pair: %d %d\n", |
4808 | INSN_UID (mii->mem_insn), INSN_UID (inc_cand)); |
4809 | |
4810 | /* Need to assure that none of the operands of the inc |
4811 | instruction are assigned to by the mem insn. */ |
4812 | FOR_EACH_INSN_DEF (def, mii->mem_insn)for (def = (((df->insns[(INSN_UID (mii->mem_insn))]))-> defs); def; def = ((def)->base.next_loc)) |
4813 | if (reg_overlap_mentioned_p (DF_REF_REG (def)((def)->base.reg), mii->inc_input) |
4814 | || reg_overlap_mentioned_p (DF_REF_REG (def)((def)->base.reg), mii->mem_reg0)) |
4815 | { |
4816 | if (sched_verbose >= 5) |
4817 | fprintf (sched_dump, |
4818 | "inc conflicts with store failure.\n"); |
4819 | goto next; |
4820 | } |
4821 | |
4822 | newaddr = mii->inc_input; |
4823 | if (mii->mem_index != NULL_RTX(rtx) 0) |
4824 | newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,gen_rtx_fmt_ee_stat ((PLUS), ((((machine_mode) (newaddr)-> mode))), ((newaddr)), ((mii->mem_index)) ) |
4825 | mii->mem_index)gen_rtx_fmt_ee_stat ((PLUS), ((((machine_mode) (newaddr)-> mode))), ((newaddr)), ((mii->mem_index)) ); |
4826 | newaddr = plus_constant (GET_MODE (newaddr)((machine_mode) (newaddr)->mode), newaddr, |
4827 | mii->mem_constant + mii->inc_constant); |
4828 | newmem = attempt_change (mii, newaddr); |
4829 | if (newmem == NULL_RTX(rtx) 0) |
4830 | goto next; |
4831 | if (sched_verbose >= 5) |
4832 | fprintf (sched_dump, "successful address replacement\n"); |
4833 | desc = XCNEW (struct dep_replacement)((struct dep_replacement *) xcalloc (1, sizeof (struct dep_replacement ))); |
4834 | DEP_REPLACE (dep)((dep)->replace) = desc; |
4835 | desc->loc = mii->mem_loc; |
4836 | desc->newval = newmem; |
4837 | desc->orig = *desc->loc; |
4838 | desc->insn = mii->mem_insn; |
4839 | move_dep_link (DEP_NODE_BACK (node)(&(node)->back), INSN_HARD_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->hard_back_deps ), |
4840 | INSN_SPEC_BACK_DEPS (con)((&h_d_i_d[(sched_luids[INSN_UID (con)])])->spec_back_deps )); |
4841 | if (backwards) |
4842 | { |
4843 | FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)for ((sd_it) = sd_iterator_start ((mii->inc_insn), (((1) | (2)))); sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next (&(sd_it))) |
4844 | add_dependence_1 (mii->mem_insn, DEP_PRO (dep)((dep)->pro), |
4845 | REG_DEP_TRUE); |
4846 | } |
4847 | else |
4848 | { |
4849 | FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)for ((sd_it) = sd_iterator_start ((mii->inc_insn), ((4))); sd_iterator_cond (&(sd_it), &(dep)); sd_iterator_next (&(sd_it))) |
4850 | add_dependence_1 (DEP_CON (dep)((dep)->con), mii->mem_insn, |
4851 | REG_DEP_ANTI); |
4852 | } |
4853 | return true; |
4854 | } |
4855 | next: |
4856 | sd_iterator_next (&sd_it); |
4857 | } |
4858 | return false; |
4859 | } |
4860 | |
4861 | /* A recursive function that walks ADDRESS_OF_X to find memory references |
4862 | which could be modified during scheduling. We call find_inc for each |
4863 | one we find that has a recognizable form. MII holds information about |
4864 | the pair of memory/increment instructions. |
4865 | We ensure that every instruction with a memory reference (which will be |
4866 | the location of the replacement) is assigned at most one breakable |
4867 | dependency. */ |
4868 | |
4869 | static bool |
4870 | find_mem (struct mem_inc_info *mii, rtx *address_of_x) |
4871 | { |
4872 | rtx x = *address_of_x; |
4873 | enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code); |
4874 | const char *const fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
4875 | int i; |
4876 | |
4877 | if (code == MEM) |
4878 | { |
4879 | rtx reg0 = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx); |
4880 | |
4881 | mii->mem_loc = address_of_x; |
4882 | mii->mem_index = NULL_RTX(rtx) 0; |
4883 | mii->mem_constant = 0; |
4884 | if (GET_CODE (reg0)((enum rtx_code) (reg0)->code) == PLUS && CONST_INT_P (XEXP (reg0, 1))(((enum rtx_code) ((((reg0)->u.fld[1]).rt_rtx))->code) == CONST_INT)) |
4885 | { |
4886 | mii->mem_constant = INTVAL (XEXP (reg0, 1))(((((reg0)->u.fld[1]).rt_rtx))->u.hwint[0]); |
4887 | reg0 = XEXP (reg0, 0)(((reg0)->u.fld[0]).rt_rtx); |
4888 | } |
4889 | if (GET_CODE (reg0)((enum rtx_code) (reg0)->code) == PLUS) |
4890 | { |
4891 | mii->mem_index = XEXP (reg0, 1)(((reg0)->u.fld[1]).rt_rtx); |
4892 | reg0 = XEXP (reg0, 0)(((reg0)->u.fld[0]).rt_rtx); |
4893 | } |
4894 | if (REG_P (reg0)(((enum rtx_code) (reg0)->code) == REG)) |
4895 | { |
4896 | df_ref use; |
4897 | int occurrences = 0; |
4898 | |
4899 | /* Make sure this reg appears only once in this insn. Can't use |
4900 | count_occurrences since that only works for pseudos. */ |
4901 | FOR_EACH_INSN_USE (use, mii->mem_insn)for (use = (((df->insns[(INSN_UID (mii->mem_insn))]))-> uses); use; use = ((use)->base.next_loc)) |
4902 | if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)((use)->base.reg))) |
4903 | if (++occurrences > 1) |
4904 | { |
4905 | if (sched_verbose >= 5) |
4906 | fprintf (sched_dump, "mem count failure\n"); |
4907 | return false; |
4908 | } |
4909 | |
4910 | mii->mem_reg0 = reg0; |
4911 | return find_inc (mii, true) || find_inc (mii, false); |
4912 | } |
4913 | return false; |
4914 | } |
4915 | |
4916 | if (code == SIGN_EXTRACT || code == ZERO_EXTRACT) |
4917 | { |
4918 | /* If REG occurs inside a MEM used in a bit-field reference, |
4919 | that is unacceptable. */ |
4920 | return false; |
4921 | } |
4922 | |
4923 | /* Time for some deep diving. */ |
4924 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
4925 | { |
4926 | if (fmt[i] == 'e') |
4927 | { |
4928 | if (find_mem (mii, &XEXP (x, i)(((x)->u.fld[i]).rt_rtx))) |
4929 | return true; |
4930 | } |
4931 | else if (fmt[i] == 'E') |
4932 | { |
4933 | int j; |
4934 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
4935 | if (find_mem (mii, &XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]))) |
4936 | return true; |
4937 | } |
4938 | } |
4939 | return false; |
4940 | } |
4941 | |
4942 | |
4943 | /* Examine the instructions between HEAD and TAIL and try to find |
4944 | dependencies that can be broken by modifying one of the patterns. */ |
4945 | |
4946 | void |
4947 | find_modifiable_mems (rtx_insn *head, rtx_insn *tail) |
4948 | { |
4949 | rtx_insn *insn, *next_tail = NEXT_INSN (tail); |
4950 | int success_in_block = 0; |
4951 | |
4952 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
4953 | { |
4954 | struct mem_inc_info mii; |
4955 | |
4956 | if (!NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || RTX_FRAME_RELATED_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN && ( (enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != CALL_INSN && ((enum rtx_code) ( _rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx )->code) != BARRIER && ((enum rtx_code) (_rtx)-> code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/sched-deps.cc" , 4956, __FUNCTION__); _rtx; })->frame_related)) |
4957 | continue; |
4958 | |
4959 | mii.mem_insn = insn; |
4960 | if (find_mem (&mii, &PATTERN (insn))) |
4961 | success_in_block++; |
4962 | } |
4963 | if (success_in_block && sched_verbose >= 5) |
4964 | fprintf (sched_dump, "%d candidates for address modification found.\n", |
4965 | success_in_block); |
4966 | } |
4967 | |
4968 | #endif /* INSN_SCHEDULING */ |