File: | build/gcc/tree-ssa-threadedge.cc |
Warning: | line 1387, column 6 2nd function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* SSA Jump Threading | |||
2 | Copyright (C) 2005-2023 Free Software Foundation, Inc. | |||
3 | Contributed by Jeff Law <law@redhat.com> | |||
4 | ||||
5 | This file is part of GCC. | |||
6 | ||||
7 | GCC is free software; you can redistribute it and/or modify | |||
8 | it under the terms of the GNU General Public License as published by | |||
9 | the Free Software Foundation; either version 3, or (at your option) | |||
10 | any later version. | |||
11 | ||||
12 | GCC is distributed in the hope that it will be useful, | |||
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
15 | GNU General Public License for more details. | |||
16 | ||||
17 | You should have received a copy of the GNU General Public License | |||
18 | along with GCC; see the file COPYING3. If not see | |||
19 | <http://www.gnu.org/licenses/>. */ | |||
20 | ||||
21 | #include "config.h" | |||
22 | #include "system.h" | |||
23 | #include "coretypes.h" | |||
24 | #include "backend.h" | |||
25 | #include "tree.h" | |||
26 | #include "gimple.h" | |||
27 | #include "predict.h" | |||
28 | #include "ssa.h" | |||
29 | #include "fold-const.h" | |||
30 | #include "cfgloop.h" | |||
31 | #include "gimple-iterator.h" | |||
32 | #include "tree-cfg.h" | |||
33 | #include "tree-ssa-threadupdate.h" | |||
34 | #include "tree-ssa-scopedtables.h" | |||
35 | #include "tree-ssa-threadedge.h" | |||
36 | #include "gimple-fold.h" | |||
37 | #include "cfganal.h" | |||
38 | #include "alloc-pool.h" | |||
39 | #include "vr-values.h" | |||
40 | #include "gimple-range.h" | |||
41 | #include "gimple-range-path.h" | |||
42 | ||||
43 | /* To avoid code explosion due to jump threading, we limit the | |||
44 | number of statements we are going to copy. This variable | |||
45 | holds the number of statements currently seen that we'll have | |||
46 | to copy as part of the jump threading process. */ | |||
47 | static int stmt_count; | |||
48 | ||||
49 | /* Array to record value-handles per SSA_NAME. */ | |||
50 | vec<tree> ssa_name_values; | |||
51 | ||||
52 | /* Set the value for the SSA name NAME to VALUE. */ | |||
53 | ||||
54 | void | |||
55 | set_ssa_name_value (tree name, tree value) | |||
56 | { | |||
57 | if (SSA_NAME_VERSION (name)(tree_check ((name), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 57, __FUNCTION__, (SSA_NAME)))->base.u.version >= ssa_name_values.length ()) | |||
58 | ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name)(tree_check ((name), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 58, __FUNCTION__, (SSA_NAME)))->base.u.version + 1, true); | |||
59 | if (value && TREE_OVERFLOW_P (value)((tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code) (value)->base.code))] == tcc_constant) && ((tree_class_check ((value), (tcc_constant), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 59, __FUNCTION__))->base.public_flag))) | |||
60 | value = drop_tree_overflow (value); | |||
61 | ssa_name_values[SSA_NAME_VERSION (name)(tree_check ((name), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 61, __FUNCTION__, (SSA_NAME)))->base.u.version] = value; | |||
62 | } | |||
63 | ||||
64 | jump_threader::jump_threader (jt_simplifier *simplifier, jt_state *state) | |||
65 | { | |||
66 | /* Initialize the per SSA_NAME value-handles array. */ | |||
67 | gcc_assert (!ssa_name_values.exists ())((void)(!(!ssa_name_values.exists ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 67, __FUNCTION__), 0 : 0)); | |||
68 | ssa_name_values.create (num_ssa_names(vec_safe_length ((cfun + 0)->gimple_df->ssa_names))); | |||
69 | ||||
70 | dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_nodeglobal_trees[TI_INTEGER_ZERO], | |||
71 | integer_zero_nodeglobal_trees[TI_INTEGER_ZERO], NULLnullptr, NULLnullptr); | |||
72 | ||||
73 | m_registry = new fwd_jt_path_registry (); | |||
74 | m_simplifier = simplifier; | |||
75 | m_state = state; | |||
76 | } | |||
77 | ||||
78 | jump_threader::~jump_threader (void) | |||
79 | { | |||
80 | ssa_name_values.release (); | |||
81 | ggc_free (dummy_cond); | |||
82 | delete m_registry; | |||
83 | } | |||
84 | ||||
85 | void | |||
86 | jump_threader::remove_jump_threads_including (edge_def *e) | |||
87 | { | |||
88 | m_registry->remove_jump_threads_including (e); | |||
89 | } | |||
90 | ||||
91 | bool | |||
92 | jump_threader::thread_through_all_blocks (bool may_peel_loop_headers) | |||
93 | { | |||
94 | return m_registry->thread_through_all_blocks (may_peel_loop_headers); | |||
95 | } | |||
96 | ||||
97 | static inline bool | |||
98 | has_phis_p (basic_block bb) | |||
99 | { | |||
100 | return !gsi_end_p (gsi_start_phis (bb)); | |||
101 | } | |||
102 | ||||
103 | /* Return TRUE for a block with PHIs but no statements. */ | |||
104 | ||||
105 | static bool | |||
106 | empty_block_with_phis_p (basic_block bb) | |||
107 | { | |||
108 | return gsi_end_p (gsi_start_nondebug_bb (bb)) && has_phis_p (bb); | |||
109 | } | |||
110 | ||||
111 | /* Return TRUE if we may be able to thread an incoming edge into | |||
112 | BB to an outgoing edge from BB. Return FALSE otherwise. */ | |||
113 | ||||
114 | static bool | |||
115 | potentially_threadable_block (basic_block bb) | |||
116 | { | |||
117 | gimple_stmt_iterator gsi; | |||
118 | ||||
119 | /* Special case. We can get blocks that are forwarders, but are | |||
120 | not optimized away because they forward from outside a loop | |||
121 | to the loop header. We want to thread through them as we can | |||
122 | sometimes thread to the loop exit, which is obviously profitable. | |||
123 | The interesting case here is when the block has PHIs. */ | |||
124 | if (empty_block_with_phis_p (bb)) | |||
125 | return true; | |||
126 | ||||
127 | /* If BB has a single successor or a single predecessor, then | |||
128 | there is no threading opportunity. */ | |||
129 | if (single_succ_p (bb) || single_pred_p (bb)) | |||
130 | return false; | |||
131 | ||||
132 | /* If BB does not end with a conditional, switch or computed goto, | |||
133 | then there is no threading opportunity. */ | |||
134 | gsi = gsi_last_bb (bb); | |||
135 | if (gsi_end_p (gsi) | |||
136 | || ! gsi_stmt (gsi) | |||
137 | || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND | |||
138 | && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO | |||
139 | && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH)) | |||
140 | return false; | |||
141 | ||||
142 | return true; | |||
143 | } | |||
144 | ||||
145 | /* Record temporary equivalences created by PHIs at the target of the | |||
146 | edge E. | |||
147 | ||||
148 | If a PHI which prevents threading is encountered, then return FALSE | |||
149 | indicating we should not thread this edge, else return TRUE. */ | |||
150 | ||||
151 | bool | |||
152 | jump_threader::record_temporary_equivalences_from_phis (edge e) | |||
153 | { | |||
154 | gphi_iterator gsi; | |||
155 | ||||
156 | /* Each PHI creates a temporary equivalence, record them. | |||
157 | These are context sensitive equivalences and will be removed | |||
158 | later. */ | |||
159 | for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |||
160 | { | |||
161 | gphi *phi = gsi.phi (); | |||
162 | tree src = PHI_ARG_DEF_FROM_EDGE (phi, e)gimple_phi_arg_def (((phi)), ((e)->dest_idx)); | |||
163 | tree dst = gimple_phi_result (phi); | |||
164 | ||||
165 | /* If the desired argument is not the same as this PHI's result | |||
166 | and it is set by a PHI in E->dest, then we cannot thread | |||
167 | through E->dest. */ | |||
168 | if (src != dst | |||
169 | && TREE_CODE (src)((enum tree_code) (src)->base.code) == SSA_NAME | |||
170 | && gimple_code (SSA_NAME_DEF_STMT (src)(tree_check ((src), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 170, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt) == GIMPLE_PHI | |||
171 | && gimple_bb (SSA_NAME_DEF_STMT (src)(tree_check ((src), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 171, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt) == e->dest) | |||
172 | return false; | |||
173 | ||||
174 | /* We consider any non-virtual PHI as a statement since it | |||
175 | count result in a constant assignment or copy operation. */ | |||
176 | if (!virtual_operand_p (dst)) | |||
177 | stmt_count++; | |||
178 | ||||
179 | m_state->register_equiv (dst, src, /*update_range=*/true); | |||
180 | } | |||
181 | return true; | |||
182 | } | |||
183 | ||||
184 | /* Valueize hook for gimple_fold_stmt_to_constant_1. */ | |||
185 | ||||
186 | static tree | |||
187 | threadedge_valueize (tree t) | |||
188 | { | |||
189 | if (TREE_CODE (t)((enum tree_code) (t)->base.code) == SSA_NAME) | |||
190 | { | |||
191 | tree tem = SSA_NAME_VALUE (t)((tree_check ((t), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 191, __FUNCTION__, (SSA_NAME)))->base.u.version < ssa_name_values .length () ? ssa_name_values[(tree_check ((t), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 191, __FUNCTION__, (SSA_NAME)))->base.u.version] : (tree ) nullptr); | |||
192 | if (tem) | |||
193 | return tem; | |||
194 | } | |||
195 | return t; | |||
196 | } | |||
197 | ||||
198 | /* Try to simplify each statement in E->dest, ultimately leading to | |||
199 | a simplification of the COND_EXPR at the end of E->dest. | |||
200 | ||||
201 | Record unwind information for temporary equivalences onto STACK. | |||
202 | ||||
203 | Uses M_SIMPLIFIER to further simplify statements using pass specific | |||
204 | information. | |||
205 | ||||
206 | We might consider marking just those statements which ultimately | |||
207 | feed the COND_EXPR. It's not clear if the overhead of bookkeeping | |||
208 | would be recovered by trying to simplify fewer statements. | |||
209 | ||||
210 | If we are able to simplify a statement into the form | |||
211 | SSA_NAME = (SSA_NAME | gimple invariant), then we can record | |||
212 | a context sensitive equivalence which may help us simplify | |||
213 | later statements in E->dest. */ | |||
214 | ||||
215 | gimple * | |||
216 | jump_threader::record_temporary_equivalences_from_stmts_at_dest (edge e) | |||
217 | { | |||
218 | gimple *stmt = NULLnullptr; | |||
219 | gimple_stmt_iterator gsi; | |||
220 | int max_stmt_count; | |||
221 | ||||
222 | max_stmt_count = param_max_jump_thread_duplication_stmtsglobal_options.x_param_max_jump_thread_duplication_stmts; | |||
223 | ||||
224 | /* Walk through each statement in the block recording equivalences | |||
225 | we discover. Note any equivalences we discover are context | |||
226 | sensitive (ie, are dependent on traversing E) and must be unwound | |||
227 | when we're finished processing E. */ | |||
228 | for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |||
229 | { | |||
230 | stmt = gsi_stmt (gsi); | |||
231 | ||||
232 | /* Ignore empty statements and labels. */ | |||
233 | if (gimple_code (stmt) == GIMPLE_NOP | |||
234 | || gimple_code (stmt) == GIMPLE_LABEL | |||
235 | || is_gimple_debug (stmt)) | |||
236 | continue; | |||
237 | ||||
238 | /* If the statement has volatile operands, then we assume we | |||
239 | cannot thread through this block. This is overly | |||
240 | conservative in some ways. */ | |||
241 | if (gimple_code (stmt) == GIMPLE_ASM | |||
242 | && gimple_asm_volatile_p (as_a <gasm *> (stmt))) | |||
243 | return NULLnullptr; | |||
244 | ||||
245 | /* If the statement is a unique builtin, we cannot thread | |||
246 | through here. */ | |||
247 | if (gimple_code (stmt) == GIMPLE_CALL | |||
248 | && gimple_call_internal_p (stmt) | |||
249 | && gimple_call_internal_unique_p (stmt)) | |||
250 | return NULLnullptr; | |||
251 | ||||
252 | /* We cannot thread through __builtin_constant_p, because an | |||
253 | expression that is constant on two threading paths may become | |||
254 | non-constant (i.e.: phi) when they merge. */ | |||
255 | if (gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)) | |||
256 | return NULLnullptr; | |||
257 | ||||
258 | /* If duplicating this block is going to cause too much code | |||
259 | expansion, then do not thread through this block. */ | |||
260 | stmt_count++; | |||
261 | if (stmt_count > max_stmt_count) | |||
262 | { | |||
263 | /* If any of the stmts in the PATH's dests are going to be | |||
264 | killed due to threading, grow the max count | |||
265 | accordingly. */ | |||
266 | if (max_stmt_count | |||
267 | == param_max_jump_thread_duplication_stmtsglobal_options.x_param_max_jump_thread_duplication_stmts) | |||
268 | { | |||
269 | max_stmt_count += estimate_threading_killed_stmts (e->dest); | |||
270 | if (dump_file) | |||
271 | fprintf (dump_file, "threading bb %i up to %i stmts\n", | |||
272 | e->dest->index, max_stmt_count); | |||
273 | } | |||
274 | /* If we're still past the limit, we're done. */ | |||
275 | if (stmt_count > max_stmt_count) | |||
276 | return NULLnullptr; | |||
277 | } | |||
278 | ||||
279 | m_state->record_ranges_from_stmt (stmt, true); | |||
280 | ||||
281 | /* If this is not a statement that sets an SSA_NAME to a new | |||
282 | value, then do not try to simplify this statement as it will | |||
283 | not simplify in any way that is helpful for jump threading. */ | |||
284 | if ((gimple_code (stmt) != GIMPLE_ASSIGN | |||
285 | || TREE_CODE (gimple_assign_lhs (stmt))((enum tree_code) (gimple_assign_lhs (stmt))->base.code) != SSA_NAME) | |||
286 | && (gimple_code (stmt) != GIMPLE_CALL | |||
287 | || gimple_call_lhs (stmt) == NULL_TREE(tree) nullptr | |||
288 | || TREE_CODE (gimple_call_lhs (stmt))((enum tree_code) (gimple_call_lhs (stmt))->base.code) != SSA_NAME)) | |||
289 | continue; | |||
290 | ||||
291 | /* The result of __builtin_object_size depends on all the arguments | |||
292 | of a phi node. Temporarily using only one edge produces invalid | |||
293 | results. For example | |||
294 | ||||
295 | if (x < 6) | |||
296 | goto l; | |||
297 | else | |||
298 | goto l; | |||
299 | ||||
300 | l: | |||
301 | r = PHI <&w[2].a[1](2), &a.a[6](3)> | |||
302 | __builtin_object_size (r, 0) | |||
303 | ||||
304 | The result of __builtin_object_size is defined to be the maximum of | |||
305 | remaining bytes. If we use only one edge on the phi, the result will | |||
306 | change to be the remaining bytes for the corresponding phi argument. | |||
307 | ||||
308 | Similarly for __builtin_constant_p: | |||
309 | ||||
310 | r = PHI <1(2), 2(3)> | |||
311 | __builtin_constant_p (r) | |||
312 | ||||
313 | Both PHI arguments are constant, but x ? 1 : 2 is still not | |||
314 | constant. */ | |||
315 | ||||
316 | if (is_gimple_call (stmt)) | |||
317 | { | |||
318 | tree fndecl = gimple_call_fndecl (stmt); | |||
319 | if (fndecl | |||
320 | && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL) | |||
321 | && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE | |||
322 | || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)) | |||
323 | continue; | |||
324 | } | |||
325 | ||||
326 | m_state->register_equivs_stmt (stmt, e->src, m_simplifier); | |||
327 | } | |||
328 | return stmt; | |||
329 | } | |||
330 | ||||
331 | /* Simplify the control statement at the end of the block E->dest. | |||
332 | ||||
333 | Use SIMPLIFY (a pointer to a callback function) to further simplify | |||
334 | a condition using pass specific information. | |||
335 | ||||
336 | Return the simplified condition or NULL if simplification could | |||
337 | not be performed. When simplifying a GIMPLE_SWITCH, we may return | |||
338 | the CASE_LABEL_EXPR that will be taken. */ | |||
339 | ||||
340 | tree | |||
341 | jump_threader::simplify_control_stmt_condition (edge e, gimple *stmt) | |||
342 | { | |||
343 | tree cond, cached_lhs; | |||
344 | enum gimple_code code = gimple_code (stmt); | |||
345 | ||||
346 | /* For comparisons, we have to update both operands, then try | |||
347 | to simplify the comparison. */ | |||
348 | if (code == GIMPLE_COND) | |||
349 | { | |||
350 | tree op0, op1; | |||
351 | enum tree_code cond_code; | |||
352 | ||||
353 | op0 = gimple_cond_lhs (stmt); | |||
354 | op1 = gimple_cond_rhs (stmt); | |||
355 | cond_code = gimple_cond_code (stmt); | |||
356 | ||||
357 | /* Get the current value of both operands. */ | |||
358 | if (TREE_CODE (op0)((enum tree_code) (op0)->base.code) == SSA_NAME) | |||
359 | { | |||
360 | for (int i = 0; i < 2; i++) | |||
361 | { | |||
362 | if (TREE_CODE (op0)((enum tree_code) (op0)->base.code) == SSA_NAME | |||
363 | && SSA_NAME_VALUE (op0)((tree_check ((op0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 363, __FUNCTION__, (SSA_NAME)))->base.u.version < ssa_name_values .length () ? ssa_name_values[(tree_check ((op0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 363, __FUNCTION__, (SSA_NAME)))->base.u.version] : (tree ) nullptr)) | |||
364 | op0 = SSA_NAME_VALUE (op0)((tree_check ((op0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 364, __FUNCTION__, (SSA_NAME)))->base.u.version < ssa_name_values .length () ? ssa_name_values[(tree_check ((op0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 364, __FUNCTION__, (SSA_NAME)))->base.u.version] : (tree ) nullptr); | |||
365 | else | |||
366 | break; | |||
367 | } | |||
368 | } | |||
369 | ||||
370 | if (TREE_CODE (op1)((enum tree_code) (op1)->base.code) == SSA_NAME) | |||
371 | { | |||
372 | for (int i = 0; i < 2; i++) | |||
373 | { | |||
374 | if (TREE_CODE (op1)((enum tree_code) (op1)->base.code) == SSA_NAME | |||
375 | && SSA_NAME_VALUE (op1)((tree_check ((op1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 375, __FUNCTION__, (SSA_NAME)))->base.u.version < ssa_name_values .length () ? ssa_name_values[(tree_check ((op1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 375, __FUNCTION__, (SSA_NAME)))->base.u.version] : (tree ) nullptr)) | |||
376 | op1 = SSA_NAME_VALUE (op1)((tree_check ((op1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 376, __FUNCTION__, (SSA_NAME)))->base.u.version < ssa_name_values .length () ? ssa_name_values[(tree_check ((op1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 376, __FUNCTION__, (SSA_NAME)))->base.u.version] : (tree ) nullptr); | |||
377 | else | |||
378 | break; | |||
379 | } | |||
380 | } | |||
381 | ||||
382 | const unsigned recursion_limit = 4; | |||
383 | ||||
384 | cached_lhs | |||
385 | = simplify_control_stmt_condition_1 (e, stmt, op0, cond_code, op1, | |||
386 | recursion_limit); | |||
387 | ||||
388 | /* If we were testing an integer/pointer against a constant, | |||
389 | then we can trace the value of the SSA_NAME. If a value is | |||
390 | found, then the condition will collapse to a constant. | |||
391 | ||||
392 | Return the SSA_NAME we want to trace back rather than the full | |||
393 | expression and give the threader a chance to find its value. */ | |||
394 | if (cached_lhs == NULLnullptr) | |||
395 | { | |||
396 | /* Recover the original operands. They may have been simplified | |||
397 | using context sensitive equivalences. Those context sensitive | |||
398 | equivalences may not be valid on paths. */ | |||
399 | tree op0 = gimple_cond_lhs (stmt); | |||
400 | tree op1 = gimple_cond_rhs (stmt); | |||
401 | ||||
402 | if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))(((enum tree_code) (((contains_struct_check ((op0), (TS_TYPED ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 402, __FUNCTION__))->typed.type))->base.code) == ENUMERAL_TYPE || ((enum tree_code) (((contains_struct_check ((op0), (TS_TYPED ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 402, __FUNCTION__))->typed.type))->base.code) == BOOLEAN_TYPE || ((enum tree_code) (((contains_struct_check ((op0), (TS_TYPED ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 402, __FUNCTION__))->typed.type))->base.code) == INTEGER_TYPE ) | |||
403 | || POINTER_TYPE_P (TREE_TYPE (op0))(((enum tree_code) (((contains_struct_check ((op0), (TS_TYPED ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 403, __FUNCTION__))->typed.type))->base.code) == POINTER_TYPE || ((enum tree_code) (((contains_struct_check ((op0), (TS_TYPED ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 403, __FUNCTION__))->typed.type))->base.code) == REFERENCE_TYPE )) | |||
404 | && TREE_CODE (op0)((enum tree_code) (op0)->base.code) == SSA_NAME | |||
405 | && TREE_CODE (op1)((enum tree_code) (op1)->base.code) == INTEGER_CST) | |||
406 | return op0; | |||
407 | } | |||
408 | ||||
409 | return cached_lhs; | |||
410 | } | |||
411 | ||||
412 | if (code == GIMPLE_SWITCH) | |||
413 | cond = gimple_switch_index (as_a <gswitch *> (stmt)); | |||
414 | else if (code == GIMPLE_GOTO) | |||
415 | cond = gimple_goto_dest (stmt); | |||
416 | else | |||
417 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 417, __FUNCTION__)); | |||
418 | ||||
419 | /* We can have conditionals which just test the state of a variable | |||
420 | rather than use a relational operator. These are simpler to handle. */ | |||
421 | if (TREE_CODE (cond)((enum tree_code) (cond)->base.code) == SSA_NAME) | |||
422 | { | |||
423 | tree original_lhs = cond; | |||
424 | cached_lhs = cond; | |||
425 | ||||
426 | /* Get the variable's current value from the equivalence chains. | |||
427 | ||||
428 | It is possible to get loops in the SSA_NAME_VALUE chains | |||
429 | (consider threading the backedge of a loop where we have | |||
430 | a loop invariant SSA_NAME used in the condition). */ | |||
431 | if (cached_lhs) | |||
432 | { | |||
433 | for (int i = 0; i < 2; i++) | |||
434 | { | |||
435 | if (TREE_CODE (cached_lhs)((enum tree_code) (cached_lhs)->base.code) == SSA_NAME | |||
436 | && SSA_NAME_VALUE (cached_lhs)((tree_check ((cached_lhs), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 436, __FUNCTION__, (SSA_NAME)))->base.u.version < ssa_name_values .length () ? ssa_name_values[(tree_check ((cached_lhs), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 436, __FUNCTION__, (SSA_NAME)))->base.u.version] : (tree ) nullptr)) | |||
437 | cached_lhs = SSA_NAME_VALUE (cached_lhs)((tree_check ((cached_lhs), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 437, __FUNCTION__, (SSA_NAME)))->base.u.version < ssa_name_values .length () ? ssa_name_values[(tree_check ((cached_lhs), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 437, __FUNCTION__, (SSA_NAME)))->base.u.version] : (tree ) nullptr); | |||
438 | else | |||
439 | break; | |||
440 | } | |||
441 | } | |||
442 | ||||
443 | /* If we haven't simplified to an invariant yet, then use the | |||
444 | pass specific callback to try and simplify it further. */ | |||
445 | if (cached_lhs && ! is_gimple_min_invariant (cached_lhs)) | |||
446 | { | |||
447 | if (code == GIMPLE_SWITCH) | |||
448 | { | |||
449 | /* Replace the index operand of the GIMPLE_SWITCH with any LHS | |||
450 | we found before handing off to VRP. If simplification is | |||
451 | possible, the simplified value will be a CASE_LABEL_EXPR of | |||
452 | the label that is proven to be taken. */ | |||
453 | gswitch *dummy_switch = as_a<gswitch *> (gimple_copy (stmt)); | |||
454 | gimple_switch_set_index (dummy_switch, cached_lhs); | |||
455 | cached_lhs = m_simplifier->simplify (dummy_switch, stmt, e->src, | |||
456 | m_state); | |||
457 | ggc_free (dummy_switch); | |||
458 | } | |||
459 | else | |||
460 | cached_lhs = m_simplifier->simplify (stmt, stmt, e->src, m_state); | |||
461 | } | |||
462 | ||||
463 | /* We couldn't find an invariant. But, callers of this | |||
464 | function may be able to do something useful with the | |||
465 | unmodified destination. */ | |||
466 | if (!cached_lhs) | |||
467 | cached_lhs = original_lhs; | |||
468 | } | |||
469 | else | |||
470 | cached_lhs = NULLnullptr; | |||
471 | ||||
472 | return cached_lhs; | |||
473 | } | |||
474 | ||||
475 | /* Recursive helper for simplify_control_stmt_condition. */ | |||
476 | ||||
477 | tree | |||
478 | jump_threader::simplify_control_stmt_condition_1 | |||
479 | (edge e, | |||
480 | gimple *stmt, | |||
481 | tree op0, | |||
482 | enum tree_code cond_code, | |||
483 | tree op1, | |||
484 | unsigned limit) | |||
485 | { | |||
486 | if (limit == 0) | |||
487 | return NULL_TREE(tree) nullptr; | |||
488 | ||||
489 | /* We may need to canonicalize the comparison. For | |||
490 | example, op0 might be a constant while op1 is an | |||
491 | SSA_NAME. Failure to canonicalize will cause us to | |||
492 | miss threading opportunities. */ | |||
493 | if (tree_swap_operands_p (op0, op1)) | |||
494 | { | |||
495 | cond_code = swap_tree_comparison (cond_code); | |||
496 | std::swap (op0, op1); | |||
497 | } | |||
498 | ||||
499 | /* If the condition has the form (A & B) CMP 0 or (A | B) CMP 0 then | |||
500 | recurse into the LHS to see if there is a simplification that | |||
501 | makes this condition always true or always false along the edge | |||
502 | E. */ | |||
503 | if ((cond_code == EQ_EXPR || cond_code == NE_EXPR) | |||
504 | && TREE_CODE (op0)((enum tree_code) (op0)->base.code) == SSA_NAME | |||
505 | && integer_zerop (op1)) | |||
506 | { | |||
507 | gimple *def_stmt = SSA_NAME_DEF_STMT (op0)(tree_check ((op0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 507, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt; | |||
508 | if (gimple_code (def_stmt) != GIMPLE_ASSIGN) | |||
509 | ; | |||
510 | else if (gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR | |||
511 | || gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR) | |||
512 | { | |||
513 | enum tree_code rhs_code = gimple_assign_rhs_code (def_stmt); | |||
514 | const tree rhs1 = gimple_assign_rhs1 (def_stmt); | |||
515 | const tree rhs2 = gimple_assign_rhs2 (def_stmt); | |||
516 | ||||
517 | /* Is A != 0 ? */ | |||
518 | const tree res1 | |||
519 | = simplify_control_stmt_condition_1 (e, def_stmt, | |||
520 | rhs1, NE_EXPR, op1, | |||
521 | limit - 1); | |||
522 | if (res1 == NULL_TREE(tree) nullptr) | |||
523 | ; | |||
524 | else if (rhs_code == BIT_AND_EXPR && integer_zerop (res1)) | |||
525 | { | |||
526 | /* If A == 0 then (A & B) != 0 is always false. */ | |||
527 | if (cond_code == NE_EXPR) | |||
528 | return boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE]; | |||
529 | /* If A == 0 then (A & B) == 0 is always true. */ | |||
530 | if (cond_code == EQ_EXPR) | |||
531 | return boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE]; | |||
532 | } | |||
533 | else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res1)) | |||
534 | { | |||
535 | /* If A != 0 then (A | B) != 0 is always true. */ | |||
536 | if (cond_code == NE_EXPR) | |||
537 | return boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE]; | |||
538 | /* If A != 0 then (A | B) == 0 is always false. */ | |||
539 | if (cond_code == EQ_EXPR) | |||
540 | return boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE]; | |||
541 | } | |||
542 | ||||
543 | /* Is B != 0 ? */ | |||
544 | const tree res2 | |||
545 | = simplify_control_stmt_condition_1 (e, def_stmt, | |||
546 | rhs2, NE_EXPR, op1, | |||
547 | limit - 1); | |||
548 | if (res2 == NULL_TREE(tree) nullptr) | |||
549 | ; | |||
550 | else if (rhs_code == BIT_AND_EXPR && integer_zerop (res2)) | |||
551 | { | |||
552 | /* If B == 0 then (A & B) != 0 is always false. */ | |||
553 | if (cond_code == NE_EXPR) | |||
554 | return boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE]; | |||
555 | /* If B == 0 then (A & B) == 0 is always true. */ | |||
556 | if (cond_code == EQ_EXPR) | |||
557 | return boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE]; | |||
558 | } | |||
559 | else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res2)) | |||
560 | { | |||
561 | /* If B != 0 then (A | B) != 0 is always true. */ | |||
562 | if (cond_code == NE_EXPR) | |||
563 | return boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE]; | |||
564 | /* If B != 0 then (A | B) == 0 is always false. */ | |||
565 | if (cond_code == EQ_EXPR) | |||
566 | return boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE]; | |||
567 | } | |||
568 | ||||
569 | if (res1 != NULL_TREE(tree) nullptr && res2 != NULL_TREE(tree) nullptr) | |||
570 | { | |||
571 | if (rhs_code == BIT_AND_EXPR | |||
572 | && TYPE_PRECISION (TREE_TYPE (op0))((tree_class_check ((((contains_struct_check ((op0), (TS_TYPED ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 572, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 572, __FUNCTION__))->type_common.precision) == 1 | |||
573 | && integer_nonzerop (res1) | |||
574 | && integer_nonzerop (res2)) | |||
575 | { | |||
576 | /* If A != 0 and B != 0 then (bool)(A & B) != 0 is true. */ | |||
577 | if (cond_code == NE_EXPR) | |||
578 | return boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE]; | |||
579 | /* If A != 0 and B != 0 then (bool)(A & B) == 0 is false. */ | |||
580 | if (cond_code == EQ_EXPR) | |||
581 | return boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE]; | |||
582 | } | |||
583 | ||||
584 | if (rhs_code == BIT_IOR_EXPR | |||
585 | && integer_zerop (res1) | |||
586 | && integer_zerop (res2)) | |||
587 | { | |||
588 | /* If A == 0 and B == 0 then (A | B) != 0 is false. */ | |||
589 | if (cond_code == NE_EXPR) | |||
590 | return boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE]; | |||
591 | /* If A == 0 and B == 0 then (A | B) == 0 is true. */ | |||
592 | if (cond_code == EQ_EXPR) | |||
593 | return boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE]; | |||
594 | } | |||
595 | } | |||
596 | } | |||
597 | /* Handle (A CMP B) CMP 0. */ | |||
598 | else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))tree_code_type_tmpl <0>::tree_code_type[(int) (gimple_assign_rhs_code (def_stmt))] | |||
599 | == tcc_comparison) | |||
600 | { | |||
601 | tree rhs1 = gimple_assign_rhs1 (def_stmt); | |||
602 | tree rhs2 = gimple_assign_rhs2 (def_stmt); | |||
603 | ||||
604 | tree_code new_cond = gimple_assign_rhs_code (def_stmt); | |||
605 | if (cond_code == EQ_EXPR) | |||
606 | new_cond = invert_tree_comparison (new_cond, false); | |||
607 | ||||
608 | tree res | |||
609 | = simplify_control_stmt_condition_1 (e, def_stmt, | |||
610 | rhs1, new_cond, rhs2, | |||
611 | limit - 1); | |||
612 | if (res != NULL_TREE(tree) nullptr && is_gimple_min_invariant (res)) | |||
613 | return res; | |||
614 | } | |||
615 | } | |||
616 | ||||
617 | gimple_cond_set_code (dummy_cond, cond_code); | |||
618 | gimple_cond_set_lhs (dummy_cond, op0); | |||
619 | gimple_cond_set_rhs (dummy_cond, op1); | |||
620 | ||||
621 | /* We absolutely do not care about any type conversions | |||
622 | we only care about a zero/nonzero value. */ | |||
623 | fold_defer_overflow_warnings (); | |||
624 | ||||
625 | tree res = fold_binary (cond_code, boolean_type_node, op0, op1)fold_binary_loc (((location_t) 0), cond_code, global_trees[TI_BOOLEAN_TYPE ], op0, op1); | |||
626 | if (res) | |||
627 | while (CONVERT_EXPR_P (res)((((enum tree_code) (res)->base.code)) == NOP_EXPR || (((enum tree_code) (res)->base.code)) == CONVERT_EXPR)) | |||
628 | res = TREE_OPERAND (res, 0)(*((const_cast<tree*> (tree_operand_check ((res), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 628, __FUNCTION__))))); | |||
629 | ||||
630 | fold_undefer_overflow_warnings ((res && is_gimple_min_invariant (res)), | |||
631 | stmt, WARN_STRICT_OVERFLOW_CONDITIONAL); | |||
632 | ||||
633 | /* If we have not simplified the condition down to an invariant, | |||
634 | then use the pass specific callback to simplify the condition. */ | |||
635 | if (!res | |||
636 | || !is_gimple_min_invariant (res)) | |||
637 | res = m_simplifier->simplify (dummy_cond, stmt, e->src, m_state); | |||
638 | ||||
639 | return res; | |||
640 | } | |||
641 | ||||
642 | /* Copy debug stmts from DEST's chain of single predecessors up to | |||
643 | SRC, so that we don't lose the bindings as PHI nodes are introduced | |||
644 | when DEST gains new predecessors. */ | |||
645 | void | |||
646 | propagate_threaded_block_debug_into (basic_block dest, basic_block src) | |||
647 | { | |||
648 | if (!MAY_HAVE_DEBUG_BIND_STMTSglobal_options.x_flag_var_tracking_assignments) | |||
649 | return; | |||
650 | ||||
651 | if (!single_pred_p (dest)) | |||
652 | return; | |||
653 | ||||
654 | gcc_checking_assert (dest != src)((void)(!(dest != src) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 654, __FUNCTION__), 0 : 0)); | |||
655 | ||||
656 | gimple_stmt_iterator gsi = gsi_after_labels (dest); | |||
657 | int i = 0; | |||
658 | const int alloc_count = 16; // ?? Should this be a PARAM? | |||
659 | ||||
660 | /* Estimate the number of debug vars overridden in the beginning of | |||
661 | DEST, to tell how many we're going to need to begin with. */ | |||
662 | for (gimple_stmt_iterator si = gsi; | |||
663 | i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si)) | |||
664 | { | |||
665 | gimple *stmt = gsi_stmt (si); | |||
666 | if (!is_gimple_debug (stmt)) | |||
667 | break; | |||
668 | if (gimple_debug_nonbind_marker_p (stmt)) | |||
669 | continue; | |||
670 | i++; | |||
671 | } | |||
672 | ||||
673 | auto_vec<tree, alloc_count> fewvars; | |||
674 | hash_set<tree> *vars = NULLnullptr; | |||
675 | ||||
676 | /* If we're already starting with 3/4 of alloc_count, go for a | |||
677 | hash_set, otherwise start with an unordered stack-allocated | |||
678 | VEC. */ | |||
679 | if (i * 4 > alloc_count * 3) | |||
680 | vars = new hash_set<tree>; | |||
681 | ||||
682 | /* Now go through the initial debug stmts in DEST again, this time | |||
683 | actually inserting in VARS or FEWVARS. Don't bother checking for | |||
684 | duplicates in FEWVARS. */ | |||
685 | for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si)) | |||
686 | { | |||
687 | gimple *stmt = gsi_stmt (si); | |||
688 | if (!is_gimple_debug (stmt)) | |||
689 | break; | |||
690 | ||||
691 | tree var; | |||
692 | ||||
693 | if (gimple_debug_bind_p (stmt)) | |||
694 | var = gimple_debug_bind_get_var (stmt); | |||
695 | else if (gimple_debug_source_bind_p (stmt)) | |||
696 | var = gimple_debug_source_bind_get_var (stmt); | |||
697 | else if (gimple_debug_nonbind_marker_p (stmt)) | |||
698 | continue; | |||
699 | else | |||
700 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 700, __FUNCTION__)); | |||
701 | ||||
702 | if (vars) | |||
703 | vars->add (var); | |||
704 | else | |||
705 | fewvars.quick_push (var); | |||
706 | } | |||
707 | ||||
708 | basic_block bb = dest; | |||
709 | ||||
710 | do | |||
711 | { | |||
712 | bb = single_pred (bb); | |||
713 | for (gimple_stmt_iterator si = gsi_last_bb (bb); | |||
714 | !gsi_end_p (si); gsi_prev (&si)) | |||
715 | { | |||
716 | gimple *stmt = gsi_stmt (si); | |||
717 | if (!is_gimple_debug (stmt)) | |||
718 | continue; | |||
719 | ||||
720 | tree var; | |||
721 | ||||
722 | if (gimple_debug_bind_p (stmt)) | |||
723 | var = gimple_debug_bind_get_var (stmt); | |||
724 | else if (gimple_debug_source_bind_p (stmt)) | |||
725 | var = gimple_debug_source_bind_get_var (stmt); | |||
726 | else if (gimple_debug_nonbind_marker_p (stmt)) | |||
727 | continue; | |||
728 | else | |||
729 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 729, __FUNCTION__)); | |||
730 | ||||
731 | /* Discard debug bind overlaps. Unlike stmts from src, | |||
732 | copied into a new block that will precede BB, debug bind | |||
733 | stmts in bypassed BBs may actually be discarded if | |||
734 | they're overwritten by subsequent debug bind stmts. We | |||
735 | want to copy binds for all modified variables, so that we | |||
736 | retain a bind to the shared def if there is one, or to a | |||
737 | newly introduced PHI node if there is one. Our bind will | |||
738 | end up reset if the value is dead, but that implies the | |||
739 | variable couldn't have survived, so it's fine. We are | |||
740 | not actually running the code that performed the binds at | |||
741 | this point, we're just adding binds so that they survive | |||
742 | the new confluence, so markers should not be copied. */ | |||
743 | if (vars && vars->add (var)) | |||
744 | continue; | |||
745 | else if (!vars) | |||
746 | { | |||
747 | int i = fewvars.length (); | |||
748 | while (i--) | |||
749 | if (fewvars[i] == var) | |||
750 | break; | |||
751 | if (i >= 0) | |||
752 | continue; | |||
753 | else if (fewvars.length () < (unsigned) alloc_count) | |||
754 | fewvars.quick_push (var); | |||
755 | else | |||
756 | { | |||
757 | vars = new hash_set<tree>; | |||
758 | for (i = 0; i < alloc_count; i++) | |||
759 | vars->add (fewvars[i]); | |||
760 | fewvars.release (); | |||
761 | vars->add (var); | |||
762 | } | |||
763 | } | |||
764 | ||||
765 | stmt = gimple_copy (stmt); | |||
766 | /* ??? Should we drop the location of the copy to denote | |||
767 | they're artificial bindings? */ | |||
768 | gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); | |||
769 | } | |||
770 | } | |||
771 | while (bb != src && single_pred_p (bb)); | |||
772 | ||||
773 | if (vars) | |||
774 | delete vars; | |||
775 | else if (fewvars.exists ()) | |||
776 | fewvars.release (); | |||
777 | } | |||
778 | ||||
779 | /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it | |||
780 | need not be duplicated as part of the CFG/SSA updating process). | |||
781 | ||||
782 | If it is threadable, add it to PATH and VISITED and recurse, ultimately | |||
783 | returning TRUE from the toplevel call. Otherwise do nothing and | |||
784 | return false. */ | |||
785 | ||||
786 | bool | |||
787 | jump_threader::thread_around_empty_blocks (vec<jump_thread_edge *> *path, | |||
788 | edge taken_edge, | |||
789 | bitmap visited) | |||
790 | { | |||
791 | basic_block bb = taken_edge->dest; | |||
792 | gimple_stmt_iterator gsi; | |||
793 | gimple *stmt; | |||
794 | tree cond; | |||
795 | ||||
796 | /* The key property of these blocks is that they need not be duplicated | |||
797 | when threading. Thus they cannot have visible side effects such | |||
798 | as PHI nodes. */ | |||
799 | if (has_phis_p (bb)) | |||
800 | return false; | |||
801 | ||||
802 | /* Skip over DEBUG statements at the start of the block. */ | |||
803 | gsi = gsi_start_nondebug_bb (bb); | |||
804 | ||||
805 | /* If the block has no statements, but does have a single successor, then | |||
806 | it's just a forwarding block and we can thread through it trivially. | |||
807 | ||||
808 | However, note that just threading through empty blocks with single | |||
809 | successors is not inherently profitable. For the jump thread to | |||
810 | be profitable, we must avoid a runtime conditional. | |||
811 | ||||
812 | By taking the return value from the recursive call, we get the | |||
813 | desired effect of returning TRUE when we found a profitable jump | |||
814 | threading opportunity and FALSE otherwise. | |||
815 | ||||
816 | This is particularly important when this routine is called after | |||
817 | processing a joiner block. Returning TRUE too aggressively in | |||
818 | that case results in pointless duplication of the joiner block. */ | |||
819 | if (gsi_end_p (gsi)) | |||
820 | { | |||
821 | if (single_succ_p (bb)) | |||
822 | { | |||
823 | taken_edge = single_succ_edge (bb); | |||
824 | ||||
825 | if ((taken_edge->flags & EDGE_DFS_BACK) != 0) | |||
826 | return false; | |||
827 | ||||
828 | if (!bitmap_bit_p (visited, taken_edge->dest->index)) | |||
829 | { | |||
830 | m_registry->push_edge (path, taken_edge, EDGE_NO_COPY_SRC_BLOCK); | |||
831 | m_state->append_path (taken_edge->dest); | |||
832 | bitmap_set_bit (visited, taken_edge->dest->index); | |||
833 | return thread_around_empty_blocks (path, taken_edge, visited); | |||
834 | } | |||
835 | } | |||
836 | ||||
837 | /* We have a block with no statements, but multiple successors? */ | |||
838 | return false; | |||
839 | } | |||
840 | ||||
841 | /* The only real statements this block can have are a control | |||
842 | flow altering statement. Anything else stops the thread. */ | |||
843 | stmt = gsi_stmt (gsi); | |||
844 | if (gimple_code (stmt) != GIMPLE_COND | |||
845 | && gimple_code (stmt) != GIMPLE_GOTO | |||
846 | && gimple_code (stmt) != GIMPLE_SWITCH) | |||
847 | return false; | |||
848 | ||||
849 | /* Extract and simplify the condition. */ | |||
850 | cond = simplify_control_stmt_condition (taken_edge, stmt); | |||
851 | ||||
852 | /* If the condition can be statically computed and we have not already | |||
853 | visited the destination edge, then add the taken edge to our thread | |||
854 | path. */ | |||
855 | if (cond != NULL_TREE(tree) nullptr | |||
856 | && (is_gimple_min_invariant (cond) | |||
857 | || TREE_CODE (cond)((enum tree_code) (cond)->base.code) == CASE_LABEL_EXPR)) | |||
858 | { | |||
859 | if (TREE_CODE (cond)((enum tree_code) (cond)->base.code) == CASE_LABEL_EXPR) | |||
860 | taken_edge = find_edge (bb, label_to_block (cfun(cfun + 0), CASE_LABEL (cond)(*((const_cast<tree*> (tree_operand_check (((tree_check ((cond), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 860, __FUNCTION__, (CASE_LABEL_EXPR)))), (2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 860, __FUNCTION__))))))); | |||
861 | else | |||
862 | taken_edge = find_taken_edge (bb, cond); | |||
863 | ||||
864 | if (!taken_edge | |||
865 | || (taken_edge->flags & EDGE_DFS_BACK) != 0) | |||
866 | return false; | |||
867 | ||||
868 | if (bitmap_bit_p (visited, taken_edge->dest->index)) | |||
869 | return false; | |||
870 | bitmap_set_bit (visited, taken_edge->dest->index); | |||
871 | ||||
872 | m_registry->push_edge (path, taken_edge, EDGE_NO_COPY_SRC_BLOCK); | |||
873 | m_state->append_path (taken_edge->dest); | |||
874 | ||||
875 | thread_around_empty_blocks (path, taken_edge, visited); | |||
876 | return true; | |||
877 | } | |||
878 | ||||
879 | return false; | |||
880 | } | |||
881 | ||||
882 | /* We are exiting E->src, see if E->dest ends with a conditional | |||
883 | jump which has a known value when reached via E. | |||
884 | ||||
885 | E->dest can have arbitrary side effects which, if threading is | |||
886 | successful, will be maintained. | |||
887 | ||||
888 | Special care is necessary if E is a back edge in the CFG as we | |||
889 | may have already recorded equivalences for E->dest into our | |||
890 | various tables, including the result of the conditional at | |||
891 | the end of E->dest. Threading opportunities are severely | |||
892 | limited in that case to avoid short-circuiting the loop | |||
893 | incorrectly. | |||
894 | ||||
895 | Positive return value is success. Zero return value is failure, but | |||
896 | the block can still be duplicated as a joiner in a jump thread path, | |||
897 | negative indicates the block should not be duplicated and thus is not | |||
898 | suitable for a joiner in a jump threading path. */ | |||
899 | ||||
900 | int | |||
901 | jump_threader::thread_through_normal_block (vec<jump_thread_edge *> *path, | |||
902 | edge e, bitmap visited) | |||
903 | { | |||
904 | m_state->register_equivs_edge (e); | |||
905 | ||||
906 | /* PHIs create temporary equivalences. | |||
907 | Note that if we found a PHI that made the block non-threadable, then | |||
908 | we need to bubble that up to our caller in the same manner we do | |||
909 | when we prematurely stop processing statements below. */ | |||
910 | if (!record_temporary_equivalences_from_phis (e)) | |||
911 | return -1; | |||
912 | ||||
913 | /* Now walk each statement recording any context sensitive | |||
914 | temporary equivalences we can detect. */ | |||
915 | gimple *stmt = record_temporary_equivalences_from_stmts_at_dest (e); | |||
916 | ||||
917 | /* There's two reasons STMT might be null, and distinguishing | |||
918 | between them is important. | |||
919 | ||||
920 | First the block may not have had any statements. For example, it | |||
921 | might have some PHIs and unconditionally transfer control elsewhere. | |||
922 | Such blocks are suitable for jump threading, particularly as a | |||
923 | joiner block. | |||
924 | ||||
925 | The second reason would be if we did not process all the statements | |||
926 | in the block (because there were too many to make duplicating the | |||
927 | block profitable. If we did not look at all the statements, then | |||
928 | we may not have invalidated everything needing invalidation. Thus | |||
929 | we must signal to our caller that this block is not suitable for | |||
930 | use as a joiner in a threading path. */ | |||
931 | if (!stmt) | |||
932 | { | |||
933 | /* First case. The statement simply doesn't have any instructions, but | |||
934 | does have PHIs. */ | |||
935 | if (empty_block_with_phis_p (e->dest)) | |||
936 | return 0; | |||
937 | ||||
938 | /* Second case. */ | |||
939 | return -1; | |||
940 | } | |||
941 | ||||
942 | /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm | |||
943 | will be taken. */ | |||
944 | if (gimple_code (stmt) == GIMPLE_COND | |||
945 | || gimple_code (stmt) == GIMPLE_GOTO | |||
946 | || gimple_code (stmt) == GIMPLE_SWITCH) | |||
947 | { | |||
948 | tree cond; | |||
949 | ||||
950 | /* Extract and simplify the condition. */ | |||
951 | cond = simplify_control_stmt_condition (e, stmt); | |||
952 | ||||
953 | if (!cond) | |||
954 | return 0; | |||
955 | ||||
956 | if (is_gimple_min_invariant (cond) | |||
957 | || TREE_CODE (cond)((enum tree_code) (cond)->base.code) == CASE_LABEL_EXPR) | |||
958 | { | |||
959 | edge taken_edge; | |||
960 | if (TREE_CODE (cond)((enum tree_code) (cond)->base.code) == CASE_LABEL_EXPR) | |||
961 | taken_edge = find_edge (e->dest, | |||
962 | label_to_block (cfun(cfun + 0), CASE_LABEL (cond)(*((const_cast<tree*> (tree_operand_check (((tree_check ((cond), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 962, __FUNCTION__, (CASE_LABEL_EXPR)))), (2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 962, __FUNCTION__))))))); | |||
963 | else | |||
964 | taken_edge = find_taken_edge (e->dest, cond); | |||
965 | ||||
966 | basic_block dest = (taken_edge ? taken_edge->dest : NULLnullptr); | |||
967 | ||||
968 | /* DEST could be NULL for a computed jump to an absolute | |||
969 | address. */ | |||
970 | if (dest == NULLnullptr | |||
971 | || dest == e->dest | |||
972 | || (taken_edge->flags & EDGE_DFS_BACK) != 0 | |||
973 | || bitmap_bit_p (visited, dest->index)) | |||
974 | return 0; | |||
975 | ||||
976 | /* Only push the EDGE_START_JUMP_THREAD marker if this is | |||
977 | first edge on the path. */ | |||
978 | if (path->length () == 0) | |||
979 | m_registry->push_edge (path, e, EDGE_START_JUMP_THREAD); | |||
980 | ||||
981 | m_registry->push_edge (path, taken_edge, EDGE_COPY_SRC_BLOCK); | |||
982 | m_state->append_path (taken_edge->dest); | |||
983 | ||||
984 | /* See if we can thread through DEST as well, this helps capture | |||
985 | secondary effects of threading without having to re-run DOM or | |||
986 | VRP. | |||
987 | ||||
988 | We don't want to thread back to a block we have already | |||
989 | visited. This may be overly conservative. */ | |||
990 | bitmap_set_bit (visited, dest->index); | |||
991 | bitmap_set_bit (visited, e->dest->index); | |||
992 | thread_around_empty_blocks (path, taken_edge, visited); | |||
993 | return 1; | |||
994 | } | |||
995 | } | |||
996 | return 0; | |||
997 | } | |||
998 | ||||
999 | /* There are basic blocks look like: | |||
1000 | <P0> | |||
1001 | p0 = a CMP b ; or p0 = (INT) (a CMP b) | |||
1002 | goto <X>; | |||
1003 | ||||
1004 | <P1> | |||
1005 | p1 = c CMP d | |||
1006 | goto <X>; | |||
1007 | ||||
1008 | <X> | |||
1009 | # phi = PHI <p0 (P0), p1 (P1)> | |||
1010 | if (phi != 0) goto <Y>; else goto <Z>; | |||
1011 | ||||
1012 | Then, edge (P0,X) or (P1,X) could be marked as EDGE_START_JUMP_THREAD | |||
1013 | And edge (X,Y), (X,Z) is EDGE_COPY_SRC_JOINER_BLOCK | |||
1014 | ||||
1015 | Return true if E is (P0,X) or (P1,X) */ | |||
1016 | ||||
1017 | bool | |||
1018 | edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e) | |||
1019 | { | |||
1020 | /* See if there is only one stmt which is gcond. */ | |||
1021 | gcond *gs; | |||
1022 | if (!(gs = safe_dyn_cast<gcond *> (last_and_only_stmt (e->dest)))) | |||
1023 | return false; | |||
1024 | ||||
1025 | /* See if gcond's cond is "(phi !=/== 0/1)" in the basic block. */ | |||
1026 | tree cond = gimple_cond_lhs (gs); | |||
1027 | enum tree_code code = gimple_cond_code (gs); | |||
1028 | tree rhs = gimple_cond_rhs (gs); | |||
1029 | if (TREE_CODE (cond)((enum tree_code) (cond)->base.code) != SSA_NAME | |||
1030 | || (code != NE_EXPR && code != EQ_EXPR) | |||
1031 | || (!integer_onep (rhs) && !integer_zerop (rhs))) | |||
1032 | return false; | |||
1033 | gphi *phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (cond)(tree_check ((cond), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1033, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt); | |||
1034 | if (phi == NULLnullptr || gimple_bb (phi) != e->dest) | |||
1035 | return false; | |||
1036 | ||||
1037 | /* Check if phi's incoming value is CMP. */ | |||
1038 | gassign *def; | |||
1039 | tree value = PHI_ARG_DEF_FROM_EDGE (phi, e)gimple_phi_arg_def (((phi)), ((e)->dest_idx)); | |||
1040 | if (TREE_CODE (value)((enum tree_code) (value)->base.code) != SSA_NAME | |||
1041 | || !has_single_use (value) | |||
1042 | || !(def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (value)(tree_check ((value), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1042, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt))) | |||
1043 | return false; | |||
1044 | ||||
1045 | /* Or if it is (INT) (a CMP b). */ | |||
1046 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def))((gimple_assign_rhs_code (def)) == NOP_EXPR || (gimple_assign_rhs_code (def)) == CONVERT_EXPR)) | |||
1047 | { | |||
1048 | value = gimple_assign_rhs1 (def); | |||
1049 | if (TREE_CODE (value)((enum tree_code) (value)->base.code) != SSA_NAME | |||
1050 | || !has_single_use (value) | |||
1051 | || !(def = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (value)(tree_check ((value), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1051, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt))) | |||
1052 | return false; | |||
1053 | } | |||
1054 | ||||
1055 | if (TREE_CODE_CLASS (gimple_assign_rhs_code (def))tree_code_type_tmpl <0>::tree_code_type[(int) (gimple_assign_rhs_code (def))] != tcc_comparison) | |||
1056 | return false; | |||
1057 | ||||
1058 | return true; | |||
1059 | } | |||
1060 | ||||
1061 | /* We are exiting E->src, see if E->dest ends with a conditional jump | |||
1062 | which has a known value when reached via E. If so, thread the | |||
1063 | edge. */ | |||
1064 | ||||
1065 | void | |||
1066 | jump_threader::thread_across_edge (edge e) | |||
1067 | { | |||
1068 | auto_bitmap visited; | |||
1069 | ||||
1070 | m_state->push (e); | |||
1071 | ||||
1072 | stmt_count = 0; | |||
1073 | ||||
1074 | vec<jump_thread_edge *> *path = m_registry->allocate_thread_path (); | |||
1075 | bitmap_set_bit (visited, e->src->index); | |||
1076 | bitmap_set_bit (visited, e->dest->index); | |||
1077 | ||||
1078 | int threaded = 0; | |||
1079 | if ((e->flags & EDGE_DFS_BACK) == 0) | |||
1080 | threaded = thread_through_normal_block (path, e, visited); | |||
1081 | ||||
1082 | if (threaded > 0) | |||
1083 | { | |||
1084 | propagate_threaded_block_debug_into (path->last ()->e->dest, | |||
1085 | e->dest); | |||
1086 | m_registry->register_jump_thread (path); | |||
1087 | m_state->pop (); | |||
1088 | return; | |||
1089 | } | |||
1090 | ||||
1091 | gcc_checking_assert (path->length () == 0)((void)(!(path->length () == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1091, __FUNCTION__), 0 : 0)); | |||
1092 | path->release (); | |||
1093 | ||||
1094 | if (threaded < 0) | |||
1095 | { | |||
1096 | /* The target block was deemed too big to duplicate. Just quit | |||
1097 | now rather than trying to use the block as a joiner in a jump | |||
1098 | threading path. | |||
1099 | ||||
1100 | This prevents unnecessary code growth, but more importantly if we | |||
1101 | do not look at all the statements in the block, then we may have | |||
1102 | missed some invalidations if we had traversed a backedge! */ | |||
1103 | m_state->pop (); | |||
1104 | return; | |||
1105 | } | |||
1106 | ||||
1107 | /* We were unable to determine what out edge from E->dest is taken. However, | |||
1108 | we might still be able to thread through successors of E->dest. This | |||
1109 | often occurs when E->dest is a joiner block which then fans back out | |||
1110 | based on redundant tests. | |||
1111 | ||||
1112 | If so, we'll copy E->dest and redirect the appropriate predecessor to | |||
1113 | the copy. Within the copy of E->dest, we'll thread one or more edges | |||
1114 | to points deeper in the CFG. | |||
1115 | ||||
1116 | This is a stopgap until we have a more structured approach to path | |||
1117 | isolation. */ | |||
1118 | { | |||
1119 | edge taken_edge; | |||
1120 | edge_iterator ei; | |||
1121 | bool found; | |||
1122 | ||||
1123 | /* If E->dest has abnormal outgoing edges, then there's no guarantee | |||
1124 | we can safely redirect any of the edges. Just punt those cases. */ | |||
1125 | FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)for ((ei) = ei_start_1 (&((e->dest->succs))); ei_cond ((ei), &(taken_edge)); ei_next (&(ei))) | |||
1126 | if (taken_edge->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE )) | |||
1127 | { | |||
1128 | m_state->pop (); | |||
1129 | return; | |||
1130 | } | |||
1131 | ||||
1132 | /* Look at each successor of E->dest to see if we can thread through it. */ | |||
1133 | FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)for ((ei) = ei_start_1 (&((e->dest->succs))); ei_cond ((ei), &(taken_edge)); ei_next (&(ei))) | |||
1134 | { | |||
1135 | if ((e->flags & EDGE_DFS_BACK) != 0 | |||
1136 | || (taken_edge->flags & EDGE_DFS_BACK) != 0) | |||
1137 | continue; | |||
1138 | ||||
1139 | m_state->push (taken_edge); | |||
1140 | ||||
1141 | /* Avoid threading to any block we have already visited. */ | |||
1142 | bitmap_clear (visited); | |||
1143 | bitmap_set_bit (visited, e->src->index); | |||
1144 | bitmap_set_bit (visited, e->dest->index); | |||
1145 | bitmap_set_bit (visited, taken_edge->dest->index); | |||
1146 | ||||
1147 | vec<jump_thread_edge *> *path = m_registry->allocate_thread_path (); | |||
1148 | m_registry->push_edge (path, e, EDGE_START_JUMP_THREAD); | |||
1149 | m_registry->push_edge (path, taken_edge, EDGE_COPY_SRC_JOINER_BLOCK); | |||
1150 | ||||
1151 | found = thread_around_empty_blocks (path, taken_edge, visited); | |||
1152 | ||||
1153 | if (!found) | |||
1154 | found = thread_through_normal_block (path, | |||
1155 | path->last ()->e, visited) > 0; | |||
1156 | ||||
1157 | /* If we were able to thread through a successor of E->dest, then | |||
1158 | record the jump threading opportunity. */ | |||
1159 | if (found | |||
1160 | || edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (e)) | |||
1161 | { | |||
1162 | if (taken_edge->dest != path->last ()->e->dest) | |||
1163 | propagate_threaded_block_debug_into (path->last ()->e->dest, | |||
1164 | taken_edge->dest); | |||
1165 | m_registry->register_jump_thread (path); | |||
1166 | } | |||
1167 | else | |||
1168 | path->release (); | |||
1169 | ||||
1170 | m_state->pop (); | |||
1171 | } | |||
1172 | } | |||
1173 | ||||
1174 | m_state->pop (); | |||
1175 | } | |||
1176 | ||||
1177 | /* Return TRUE if BB has a single successor to a block with multiple | |||
1178 | incoming and outgoing edges. */ | |||
1179 | ||||
1180 | bool | |||
1181 | single_succ_to_potentially_threadable_block (basic_block bb) | |||
1182 | { | |||
1183 | int flags = (EDGE_IGNORE | EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE ) | EDGE_ABNORMAL); | |||
1184 | return (single_succ_p (bb) | |||
1185 | && (single_succ_edge (bb)->flags & flags) == 0 | |||
1186 | && potentially_threadable_block (single_succ (bb))); | |||
1187 | } | |||
1188 | ||||
1189 | /* Examine the outgoing edges from BB and conditionally | |||
1190 | try to thread them. */ | |||
1191 | ||||
1192 | void | |||
1193 | jump_threader::thread_outgoing_edges (basic_block bb) | |||
1194 | { | |||
1195 | int flags = (EDGE_IGNORE | EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE ) | EDGE_ABNORMAL); | |||
1196 | gimple *last; | |||
1197 | ||||
1198 | if (!flag_thread_jumpsglobal_options.x_flag_thread_jumps) | |||
1199 | return; | |||
1200 | ||||
1201 | /* If we have an outgoing edge to a block with multiple incoming and | |||
1202 | outgoing edges, then we may be able to thread the edge, i.e., we | |||
1203 | may be able to statically determine which of the outgoing edges | |||
1204 | will be traversed when the incoming edge from BB is traversed. */ | |||
1205 | if (single_succ_to_potentially_threadable_block (bb)) | |||
1206 | thread_across_edge (single_succ_edge (bb)); | |||
1207 | else if ((last = last_stmt (bb)) | |||
1208 | && gimple_code (last) == GIMPLE_COND | |||
1209 | && EDGE_COUNT (bb->succs)vec_safe_length (bb->succs) == 2 | |||
1210 | && (EDGE_SUCC (bb, 0)(*(bb)->succs)[(0)]->flags & flags) == 0 | |||
1211 | && (EDGE_SUCC (bb, 1)(*(bb)->succs)[(1)]->flags & flags) == 0) | |||
1212 | { | |||
1213 | edge true_edge, false_edge; | |||
1214 | ||||
1215 | extract_true_false_edges_from_block (bb, &true_edge, &false_edge); | |||
1216 | ||||
1217 | /* Only try to thread the edge if it reaches a target block with | |||
1218 | more than one predecessor and more than one successor. */ | |||
1219 | if (potentially_threadable_block (true_edge->dest)) | |||
1220 | thread_across_edge (true_edge); | |||
1221 | ||||
1222 | /* Similarly for the ELSE arm. */ | |||
1223 | if (potentially_threadable_block (false_edge->dest)) | |||
1224 | thread_across_edge (false_edge); | |||
1225 | } | |||
1226 | } | |||
1227 | ||||
1228 | // Marker to keep track of the start of the current path. | |||
1229 | const basic_block jt_state::BB_MARKER = (basic_block) -1; | |||
1230 | ||||
1231 | // Record that E is being crossed. | |||
1232 | ||||
1233 | void | |||
1234 | jt_state::push (edge e) | |||
1235 | { | |||
1236 | m_blocks.safe_push (BB_MARKER); | |||
1237 | if (m_blocks.length () == 1) | |||
1238 | m_blocks.safe_push (e->src); | |||
1239 | m_blocks.safe_push (e->dest); | |||
1240 | } | |||
1241 | ||||
1242 | // Pop to the last pushed state. | |||
1243 | ||||
1244 | void | |||
1245 | jt_state::pop () | |||
1246 | { | |||
1247 | if (!m_blocks.is_empty ()) | |||
1248 | { | |||
1249 | while (m_blocks.last () != BB_MARKER) | |||
1250 | m_blocks.pop (); | |||
1251 | // Pop marker. | |||
1252 | m_blocks.pop (); | |||
1253 | } | |||
1254 | } | |||
1255 | ||||
1256 | // Add BB to the list of blocks seen. | |||
1257 | ||||
1258 | void | |||
1259 | jt_state::append_path (basic_block bb) | |||
1260 | { | |||
1261 | gcc_checking_assert (!m_blocks.is_empty ())((void)(!(!m_blocks.is_empty ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1261, __FUNCTION__), 0 : 0)); | |||
1262 | m_blocks.safe_push (bb); | |||
1263 | } | |||
1264 | ||||
1265 | void | |||
1266 | jt_state::dump (FILE *out) | |||
1267 | { | |||
1268 | if (!m_blocks.is_empty ()) | |||
1269 | { | |||
1270 | auto_vec<basic_block> path; | |||
1271 | get_path (path); | |||
1272 | dump_ranger (out, path); | |||
1273 | } | |||
1274 | } | |||
1275 | ||||
1276 | void | |||
1277 | jt_state::debug () | |||
1278 | { | |||
1279 | push_dump_file save (stderrstderr, TDF_DETAILS); | |||
1280 | dump (stderrstderr); | |||
1281 | } | |||
1282 | ||||
1283 | // Convert the current path in jt_state into a path suitable for the | |||
1284 | // path solver. Return the resulting path in PATH. | |||
1285 | ||||
1286 | void | |||
1287 | jt_state::get_path (vec<basic_block> &path) | |||
1288 | { | |||
1289 | path.truncate (0); | |||
1290 | ||||
1291 | for (int i = (int) m_blocks.length () - 1; i >= 0; --i) | |||
1292 | { | |||
1293 | basic_block bb = m_blocks[i]; | |||
1294 | ||||
1295 | if (bb != BB_MARKER) | |||
1296 | path.safe_push (bb); | |||
1297 | } | |||
1298 | } | |||
1299 | ||||
1300 | // Record an equivalence from DST to SRC. If UPDATE_RANGE is TRUE, | |||
1301 | // update the value range associated with DST. | |||
1302 | ||||
1303 | void | |||
1304 | jt_state::register_equiv (tree dest ATTRIBUTE_UNUSED__attribute__ ((__unused__)), | |||
1305 | tree src ATTRIBUTE_UNUSED__attribute__ ((__unused__)), | |||
1306 | bool update_range ATTRIBUTE_UNUSED__attribute__ ((__unused__))) | |||
1307 | { | |||
1308 | } | |||
1309 | ||||
1310 | // Record any ranges calculated in STMT. If TEMPORARY is TRUE, then | |||
1311 | // this is a temporary equivalence and should be recorded into the | |||
1312 | // unwind table, instead of the global table. | |||
1313 | ||||
1314 | void | |||
1315 | jt_state::record_ranges_from_stmt (gimple *, | |||
1316 | bool temporary ATTRIBUTE_UNUSED__attribute__ ((__unused__))) | |||
1317 | { | |||
1318 | } | |||
1319 | ||||
1320 | // Record any equivalences created by traversing E. | |||
1321 | ||||
1322 | void | |||
1323 | jt_state::register_equivs_edge (edge) | |||
1324 | { | |||
1325 | } | |||
1326 | ||||
1327 | void | |||
1328 | jt_state::register_equivs_stmt (gimple *stmt, basic_block bb, | |||
1329 | jt_simplifier *simplifier) | |||
1330 | { | |||
1331 | /* At this point we have a statement which assigns an RHS to an | |||
1332 | SSA_VAR on the LHS. We want to try and simplify this statement | |||
1333 | to expose more context sensitive equivalences which in turn may | |||
1334 | allow us to simplify the condition at the end of the loop. | |||
1335 | ||||
1336 | Handle simple copy operations. */ | |||
1337 | tree cached_lhs = NULLnullptr; | |||
1338 | if (gimple_assign_single_p (stmt) | |||
1339 | && TREE_CODE (gimple_assign_rhs1 (stmt))((enum tree_code) (gimple_assign_rhs1 (stmt))->base.code) == SSA_NAME) | |||
1340 | cached_lhs = gimple_assign_rhs1 (stmt); | |||
1341 | else | |||
1342 | { | |||
1343 | /* A statement that is not a trivial copy. | |||
1344 | Try to fold the new expression. Inserting the | |||
1345 | expression into the hash table is unlikely to help. */ | |||
1346 | /* ??? The DOM callback below can be changed to setting | |||
1347 | the mprts_hook around the call to thread_across_edge, | |||
1348 | avoiding the use substitution. */ | |||
1349 | cached_lhs = gimple_fold_stmt_to_constant_1 (stmt, | |||
1350 | threadedge_valueize); | |||
1351 | if (NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES)num_ssa_operands (stmt, ((0x04) | 0x01)) != 0 | |||
| ||||
1352 | && (!cached_lhs | |||
1353 | || (TREE_CODE (cached_lhs)((enum tree_code) (cached_lhs)->base.code) != SSA_NAME | |||
1354 | && !is_gimple_min_invariant (cached_lhs)))) | |||
1355 | { | |||
1356 | /* We're going to temporarily copy propagate the operands | |||
1357 | and see if that allows us to simplify this statement. */ | |||
1358 | tree *copy; | |||
1359 | ssa_op_iter iter; | |||
1360 | use_operand_p use_p; | |||
1361 | unsigned int num, i = 0; | |||
1362 | ||||
1363 | num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES)num_ssa_operands (stmt, ((0x04) | 0x01)); | |||
1364 | copy = XALLOCAVEC (tree, num)((tree *) __builtin_alloca(sizeof (tree) * (num))); | |||
1365 | ||||
1366 | /* Make a copy of the uses & vuses into USES_COPY, then cprop into | |||
1367 | the operands. */ | |||
1368 | FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)for (use_p = op_iter_init_use (&(iter), stmt, ((0x04) | 0x01 )); !op_iter_done (&(iter)); use_p = op_iter_next_use (& (iter))) | |||
1369 | { | |||
1370 | tree tmp = NULLnullptr; | |||
1371 | tree use = USE_FROM_PTR (use_p)get_use_from_ptr (use_p); | |||
1372 | ||||
1373 | copy[i++] = use; | |||
1374 | if (TREE_CODE (use)((enum tree_code) (use)->base.code) == SSA_NAME) | |||
1375 | tmp = SSA_NAME_VALUE (use)((tree_check ((use), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1375, __FUNCTION__, (SSA_NAME)))->base.u.version < ssa_name_values .length () ? ssa_name_values[(tree_check ((use), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1375, __FUNCTION__, (SSA_NAME)))->base.u.version] : (tree ) nullptr); | |||
1376 | if (tmp) | |||
1377 | SET_USE (use_p, tmp)set_ssa_use_from_ptr (use_p, tmp); | |||
1378 | } | |||
1379 | ||||
1380 | /* Do not pass state to avoid calling the ranger with the | |||
1381 | temporarily altered IL. */ | |||
1382 | cached_lhs = simplifier->simplify (stmt, stmt, bb, /*state=*/NULLnullptr); | |||
1383 | ||||
1384 | /* Restore the statement's original uses/defs. */ | |||
1385 | i = 0; | |||
1386 | FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)for (use_p = op_iter_init_use (&(iter), stmt, ((0x04) | 0x01 )); !op_iter_done (&(iter)); use_p = op_iter_next_use (& (iter))) | |||
1387 | SET_USE (use_p, copy[i++])set_ssa_use_from_ptr (use_p, copy[i++]); | |||
| ||||
1388 | } | |||
1389 | } | |||
1390 | ||||
1391 | /* Record the context sensitive equivalence if we were able | |||
1392 | to simplify this statement. */ | |||
1393 | if (cached_lhs | |||
1394 | && (TREE_CODE (cached_lhs)((enum tree_code) (cached_lhs)->base.code) == SSA_NAME | |||
1395 | || is_gimple_min_invariant (cached_lhs))) | |||
1396 | register_equiv (gimple_get_lhs (stmt), cached_lhs, | |||
1397 | /*update_range=*/false); | |||
1398 | } | |||
1399 | ||||
1400 | // Hybrid threader implementation. | |||
1401 | ||||
1402 | hybrid_jt_simplifier::hybrid_jt_simplifier (gimple_ranger *r, | |||
1403 | path_range_query *q) | |||
1404 | { | |||
1405 | m_ranger = r; | |||
1406 | m_query = q; | |||
1407 | } | |||
1408 | ||||
1409 | tree | |||
1410 | hybrid_jt_simplifier::simplify (gimple *stmt, gimple *, basic_block, | |||
1411 | jt_state *state) | |||
1412 | { | |||
1413 | auto_bitmap dependencies; | |||
1414 | auto_vec<basic_block> path; | |||
1415 | ||||
1416 | state->get_path (path); | |||
1417 | compute_exit_dependencies (dependencies, path, stmt); | |||
1418 | m_query->reset_path (path, dependencies); | |||
1419 | ||||
1420 | if (gimple_code (stmt) == GIMPLE_COND | |||
1421 | || gimple_code (stmt) == GIMPLE_ASSIGN) | |||
1422 | { | |||
1423 | Value_Range r (gimple_range_type (stmt)); | |||
1424 | tree ret; | |||
1425 | if (m_query->range_of_stmt (r, stmt) && r.singleton_p (&ret)) | |||
1426 | return ret; | |||
1427 | } | |||
1428 | else if (gimple_code (stmt) == GIMPLE_SWITCH) | |||
1429 | { | |||
1430 | int_range_max r; | |||
1431 | gswitch *switch_stmt = dyn_cast <gswitch *> (stmt); | |||
1432 | tree index = gimple_switch_index (switch_stmt); | |||
1433 | if (m_query->range_of_expr (r, index, stmt)) | |||
1434 | return find_case_label_range (switch_stmt, &r); | |||
1435 | } | |||
1436 | return NULLnullptr; | |||
1437 | } | |||
1438 | ||||
1439 | // Calculate the set of exit dependencies for a path and statement to | |||
1440 | // be simplified. This is different than the | |||
1441 | // compute_exit_dependencies in the path solver because the forward | |||
1442 | // threader asks questions about statements not necessarily in the | |||
1443 | // path. Using the default compute_exit_dependencies in the path | |||
1444 | // solver gets noticeably less threads. | |||
1445 | ||||
1446 | void | |||
1447 | hybrid_jt_simplifier::compute_exit_dependencies (bitmap dependencies, | |||
1448 | const vec<basic_block> &path, | |||
1449 | gimple *stmt) | |||
1450 | { | |||
1451 | gori_compute &gori = m_ranger->gori (); | |||
1452 | ||||
1453 | // Start with the imports to the final conditional. | |||
1454 | bitmap_copy (dependencies, gori.imports (path[0])); | |||
1455 | ||||
1456 | // Add any other interesting operands we may have missed. | |||
1457 | if (gimple_bb (stmt) != path[0]) | |||
1458 | { | |||
1459 | for (unsigned i = 0; i < gimple_num_ops (stmt); ++i) | |||
1460 | { | |||
1461 | tree op = gimple_op (stmt, i); | |||
1462 | if (op | |||
1463 | && TREE_CODE (op)((enum tree_code) (op)->base.code) == SSA_NAME | |||
1464 | && Value_Range::supports_type_p (TREE_TYPE (op)((contains_struct_check ((op), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1464, __FUNCTION__))->typed.type))) | |||
1465 | bitmap_set_bit (dependencies, SSA_NAME_VERSION (op)(tree_check ((op), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-threadedge.cc" , 1465, __FUNCTION__, (SSA_NAME)))->base.u.version); | |||
1466 | } | |||
1467 | } | |||
1468 | } |