Bug Summary

File:build/gcc/wide-int.h
Warning:line 1283, column 3
Undefined or garbage value returned to caller

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-suse-linux -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name gimple-ssa-warn-restrict.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -resource-dir /usr/lib64/clang/15.0.7 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/backward -internal-isystem /usr/lib64/clang/15.0.7/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../x86_64-suse-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2023-03-27-141847-20772-1/report-mN6nK3.plist -x c++ /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc

/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc

1/* Pass to detect and issue warnings for violations of the restrict
2 qualifier.
3 Copyright (C) 2017-2023 Free Software Foundation, Inc.
4 Contributed by Martin Sebor <msebor@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "backend.h"
26#include "tree.h"
27#include "gimple.h"
28#include "tree-pass.h"
29#include "pointer-query.h"
30#include "ssa.h"
31#include "gimple-pretty-print.h"
32#include "gimple-ssa-warn-access.h"
33#include "gimple-ssa-warn-restrict.h"
34#include "diagnostic-core.h"
35#include "fold-const.h"
36#include "gimple-iterator.h"
37#include "tree-dfa.h"
38#include "tree-ssa.h"
39#include "tree-cfg.h"
40#include "tree-object-size.h"
41#include "calls.h"
42#include "cfgloop.h"
43#include "intl.h"
44#include "gimple-range.h"
45
46namespace {
47
48const pass_data pass_data_wrestrict = {
49 GIMPLE_PASS,
50 "wrestrict",
51 OPTGROUP_NONE,
52 TV_NONE,
53 PROP_cfg(1 << 3), /* Properties_required. */
54 0, /* properties_provided. */
55 0, /* properties_destroyed. */
56 0, /* properties_start */
57 0, /* properties_finish */
58};
59
60/* Pass to detect violations of strict aliasing requirements in calls
61 to built-in string and raw memory functions. */
62class pass_wrestrict : public gimple_opt_pass
63{
64 public:
65 pass_wrestrict (gcc::context *);
66
67 bool gate (function *) final override;
68 unsigned int execute (function *) final override;
69
70 void check_call (gimple *);
71
72 void check_block (basic_block);
73
74 /* A pointer_query object to store information about pointers and
75 their targets in. */
76 pointer_query m_ptr_qry;
77};
78
79pass_wrestrict::pass_wrestrict (gcc::context *ctxt)
80 : gimple_opt_pass (pass_data_wrestrict, ctxt),
81 m_ptr_qry ()
82{ }
83
84bool
85pass_wrestrict::gate (function *fun ATTRIBUTE_UNUSED__attribute__ ((__unused__)))
86{
87 return warn_array_boundsglobal_options.x_warn_array_bounds || warn_restrictglobal_options.x_warn_restrict || warn_stringop_overflowglobal_options.x_warn_stringop_overflow;
88}
89
90void
91pass_wrestrict::check_block (basic_block bb)
92{
93 /* Iterate over statements, looking for function calls. */
94 for (auto si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
95 {
96 gimple *stmt = gsi_stmt (si);
97 if (!is_gimple_call (stmt))
98 continue;
99
100 check_call (stmt);
101 }
102}
103
104unsigned
105pass_wrestrict::execute (function *fun)
106{
107 /* Create a new ranger instance and associate it with FUN. */
108 m_ptr_qry.rvals = enable_ranger (fun);
109
110 basic_block bb;
111 FOR_EACH_BB_FN (bb, fun)for (bb = (fun)->cfg->x_entry_block_ptr->next_bb; bb
!= (fun)->cfg->x_exit_block_ptr; bb = bb->next_bb)
112 check_block (bb);
113
114 m_ptr_qry.flush_cache ();
115
116 /* Release the ranger instance and replace it with a global ranger.
117 Also reset the pointer since calling disable_ranger() deletes it. */
118 disable_ranger (fun);
119 m_ptr_qry.rvals = NULL__null;
120
121 return 0;
122}
123
124/* Description of a memory reference by a built-in function. This
125 is similar to ao_ref but made especially suitable for -Wrestrict
126 and not for optimization. */
127class builtin_memref
128{
129public:
130 /* The original pointer argument to the built-in function. */
131 tree ptr;
132 /* The referenced subobject or NULL if not available, and the base
133 object of the memory reference or NULL. */
134 tree ref;
135 tree base;
136
137 /* The size of the BASE object, PTRDIFF_MAX if indeterminate,
138 and negative until (possibly lazily) initialized. */
139 offset_int basesize;
140 /* Same for the subobject. */
141 offset_int refsize;
142
143 /* The non-negative offset of the referenced subobject. Used to avoid
144 warnings for (apparently) possibly but not definitively overlapping
145 accesses to member arrays. Negative when unknown/invalid. */
146 offset_int refoff;
147
148 /* The offset range relative to the base. */
149 offset_int offrange[2];
150 /* The size range of the access to this reference. */
151 offset_int sizrange[2];
152
153 /* Cached result of get_max_objsize(). */
154 const offset_int maxobjsize;
155
156 /* True for "bounded" string functions like strncat, and strncpy
157 and their variants that specify either an exact or upper bound
158 on the size of the accesses they perform. For strncat both
159 the source and destination references are bounded. For strncpy
160 only the destination reference is. */
161 bool strbounded_p;
162
163 builtin_memref (pointer_query &, gimple *, tree, tree);
164
165 tree offset_out_of_bounds (int, offset_int[3]) const;
166
167private:
168 /* Call statement to the built-in. */
169 gimple *stmt;
170
171 pointer_query &m_ptr_qry;
172
173 /* Ctor helper to set or extend OFFRANGE based on argument. */
174 void extend_offset_range (tree);
175
176 /* Ctor helper to determine BASE and OFFRANGE from argument. */
177 void set_base_and_offset (tree);
178};
179
180/* Description of a memory access by a raw memory or string built-in
181 function involving a pair of builtin_memref's. */
182class builtin_access
183{
184 public:
185 /* Destination and source memory reference. */
186 builtin_memref* const dstref;
187 builtin_memref* const srcref;
188 /* The size range of the access. It's the greater of the accesses
189 to the two references. */
190 HOST_WIDE_INTlong sizrange[2];
191
192 /* The minimum and maximum offset of an overlap of the access
193 (if it does, in fact, overlap), and the size of the overlap. */
194 HOST_WIDE_INTlong ovloff[2];
195 HOST_WIDE_INTlong ovlsiz[2];
196
197 /* True to consider valid only accesses to the smallest subobject
198 and false for raw memory functions. */
199 bool strict () const
200 {
201 return (detect_overlap != &builtin_access::generic_overlap
202 && detect_overlap != &builtin_access::no_overlap);
203 }
204
205 builtin_access (pointer_query &, gimple *,
206 builtin_memref &, builtin_memref &);
207
208 /* Entry point to determine overlap. */
209 bool overlap ();
210
211 offset_int write_off (tree) const;
212
213 void dump (FILE *) const;
214
215 private:
216 /* Implementation functions used to determine overlap. */
217 bool generic_overlap ();
218 bool strcat_overlap ();
219 bool strcpy_overlap ();
220
221 bool no_overlap ()
222 {
223 return false;
224 }
225
226 offset_int overlap_size (const offset_int [2], const offset_int[2],
227 offset_int [2]);
228
229 private:
230 /* Temporaries used to compute the final result. */
231 offset_int dstoff[2];
232 offset_int srcoff[2];
233 offset_int dstsiz[2];
234 offset_int srcsiz[2];
235
236 /* Pointer to a member function to call to determine overlap. */
237 bool (builtin_access::*detect_overlap) ();
238};
239
240/* Initialize a memory reference representation from a pointer EXPR and
241 a size SIZE in bytes. If SIZE is NULL_TREE then the size is assumed
242 to be unknown. STMT is the statement in which expr appears in. */
243
244builtin_memref::builtin_memref (pointer_query &ptrqry, gimple *stmt, tree expr,
245 tree size)
246: ptr (expr),
247 ref (),
248 base (),
249 basesize (-1),
250 refsize (-1),
251 refoff (HOST_WIDE_INT_MIN(long) (1UL << (64 - 1))),
252 offrange (),
253 sizrange (),
254 maxobjsize (tree_to_shwi (max_object_size ())),
255 strbounded_p (),
256 stmt (stmt),
257 m_ptr_qry (ptrqry)
258{
259 /* Unfortunately, wide_int default ctor is a no-op so array members
260 of the type must be set individually. */
261 offrange[0] = offrange[1] = 0;
262 sizrange[0] = sizrange[1] = 0;
263
264 if (!expr)
265 return;
266
267 /* Find the BASE object or pointer referenced by EXPR and set
268 the offset range OFFRANGE in the process. */
269 set_base_and_offset (expr);
270
271 if (size)
272 {
273 tree range[2];
274 /* Determine the size range, allowing for the result to be [0, 0]
275 for SIZE in the anti-range ~[0, N] where N >= PTRDIFF_MAX. */
276 get_size_range (m_ptr_qry.rvals, size, stmt, range, SR_ALLOW_ZERO);
277 sizrange[0] = wi::to_offset (range[0]);
278 sizrange[1] = wi::to_offset (range[1]);
279 /* get_size_range returns SIZE_MAX for the maximum size.
280 Constrain it to the real maximum of PTRDIFF_MAX. */
281 if (sizrange[0] <= maxobjsize && sizrange[1] > maxobjsize)
282 sizrange[1] = maxobjsize;
283 }
284 else
285 sizrange[1] = maxobjsize;
286
287 if (!DECL_P (base)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (base)->base.code))] == tcc_declaration)
)
288 return;
289
290 /* If the offset could be in the range of the referenced object
291 constrain its bounds so neither exceeds those of the object. */
292 if (offrange[0] < 0 && offrange[1] > 0)
293 offrange[0] = 0;
294
295 offset_int maxoff = maxobjsize;
296 tree basetype = TREE_TYPE (base)((contains_struct_check ((base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 296, __FUNCTION__))->typed.type)
;
297 if (TREE_CODE (basetype)((enum tree_code) (basetype)->base.code) == ARRAY_TYPE)
298 {
299 if (ref && array_ref_flexible_size_p (ref))
300 ; /* Use the maximum possible offset for an array that might
301 have flexible size. */
302 else if (tree basesize = TYPE_SIZE_UNIT (basetype)((tree_class_check ((basetype), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 302, __FUNCTION__))->type_common.size_unit)
)
303 if (TREE_CODE (basesize)((enum tree_code) (basesize)->base.code) == INTEGER_CST)
304 /* Size could be non-constant for a variable-length type such
305 as a struct with a VLA member (a GCC extension). */
306 maxoff = wi::to_offset (basesize);
307 }
308
309 if (offrange[0] >= 0)
310 {
311 if (offrange[1] < 0)
312 offrange[1] = offrange[0] <= maxoff ? maxoff : maxobjsize;
313 else if (offrange[0] <= maxoff && offrange[1] > maxoff)
314 offrange[1] = maxoff;
315 }
316}
317
318/* Based on the initial length of the destination STARTLEN, returns
319 the offset of the first write access from the beginning of
320 the destination. Nonzero only for strcat-type of calls. */
321
322offset_int builtin_access::write_off (tree startlen) const
323{
324 if (detect_overlap != &builtin_access::strcat_overlap
325 || !startlen || TREE_CODE (startlen)((enum tree_code) (startlen)->base.code) != INTEGER_CST)
326 return 0;
327
328 return wi::to_offset (startlen);
329}
330
331/* Ctor helper to set or extend OFFRANGE based on the OFFSET argument.
332 Pointer offsets are represented as unsigned sizetype but must be
333 treated as signed. */
334
335void
336builtin_memref::extend_offset_range (tree offset)
337{
338 if (TREE_CODE (offset)((enum tree_code) (offset)->base.code) == INTEGER_CST)
339 {
340 offset_int off = int_cst_value (offset);
341 if (off != 0)
342 {
343 offrange[0] += off;
344 offrange[1] += off;
345 }
346 return;
347 }
348
349 if (TREE_CODE (offset)((enum tree_code) (offset)->base.code) == SSA_NAME)
350 {
351 /* A pointer offset is represented as sizetype but treated
352 as signed. */
353 wide_int min, max;
354 value_range_kind rng = VR_VARYING;
355 value_range vr;
356 if (m_ptr_qry.rvals->range_of_expr (vr, offset, stmt))
357 {
358 rng = vr.kind ();
359 if (!vr.undefined_p ())
360 {
361 min = wi::to_wide (vr.min ());
362 max = wi::to_wide (vr.max ());
363 }
364 }
365
366 if (rng == VR_ANTI_RANGE && wi::lts_p (max, min))
367 {
368 /* Convert an anti-range whose upper bound is less than
369 its lower bound to a signed range. */
370 offrange[0] += offset_int::from (max + 1, SIGNED);
371 offrange[1] += offset_int::from (min - 1, SIGNED);
372 return;
373 }
374
375 if (rng == VR_RANGE
376 && (DECL_P (base)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (base)->base.code))] == tcc_declaration)
|| wi::lts_p (min, max)))
377 {
378 /* Preserve the bounds of the range for an offset into
379 a known object (it may be adjusted later relative to
380 a constant offset from its beginning). Otherwise use
381 the bounds only when they are ascending when treated
382 as signed. */
383 offrange[0] += offset_int::from (min, SIGNED);
384 offrange[1] += offset_int::from (max, SIGNED);
385 return;
386 }
387
388 /* Handle an anti-range the same as no range at all. */
389 gimple *stmt = SSA_NAME_DEF_STMT (offset)(tree_check ((offset), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 389, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
390 tree type;
391 if (is_gimple_assign (stmt)
392 && (type = TREE_TYPE (gimple_assign_rhs1 (stmt))((contains_struct_check ((gimple_assign_rhs1 (stmt)), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 392, __FUNCTION__))->typed.type)
)
393 && INTEGRAL_TYPE_P (type)(((enum tree_code) (type)->base.code) == ENUMERAL_TYPE || (
(enum tree_code) (type)->base.code) == BOOLEAN_TYPE || ((enum
tree_code) (type)->base.code) == INTEGER_TYPE)
)
394 {
395 tree_code code = gimple_assign_rhs_code (stmt);
396 if (code == NOP_EXPR)
397 {
398 /* Use the bounds of the type of the NOP_EXPR operand
399 even if it's signed. The result doesn't trigger
400 warnings but makes their output more readable. */
401 offrange[0] += wi::to_offset (TYPE_MIN_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 401, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
)
);
402 offrange[1] += wi::to_offset (TYPE_MAX_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 402, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
)
);
403 return;
404 }
405 }
406 }
407
408 const offset_int maxoff = tree_to_shwi (max_object_size ()) >> 1;
409 const offset_int minoff = -maxoff - 1;
410
411 offrange[0] += minoff;
412 offrange[1] += maxoff;
413}
414
415/* Determines the base object or pointer of the reference EXPR
416 and the offset range from the beginning of the base. */
417
418void
419builtin_memref::set_base_and_offset (tree expr)
420{
421 tree offset = NULL_TREE(tree) __null;
422
423 if (TREE_CODE (expr)((enum tree_code) (expr)->base.code) == SSA_NAME)
424 {
425 /* Try to tease the offset out of the pointer. */
426 gimple *stmt = SSA_NAME_DEF_STMT (expr)(tree_check ((expr), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 426, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
427 if (!base
428 && gimple_assign_single_p (stmt)
429 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
430 expr = gimple_assign_rhs1 (stmt);
431 else if (is_gimple_assign (stmt))
432 {
433 tree_code code = gimple_assign_rhs_code (stmt);
434 if (CONVERT_EXPR_CODE_P (code)((code) == NOP_EXPR || (code) == CONVERT_EXPR))
435 {
436 tree rhs = gimple_assign_rhs1 (stmt);
437 if (POINTER_TYPE_P (TREE_TYPE (rhs))(((enum tree_code) (((contains_struct_check ((rhs), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 437, __FUNCTION__))->typed.type))->base.code) == POINTER_TYPE
|| ((enum tree_code) (((contains_struct_check ((rhs), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 437, __FUNCTION__))->typed.type))->base.code) == REFERENCE_TYPE
)
)
438 expr = gimple_assign_rhs1 (stmt);
439 else
440 {
441 base = expr;
442 return;
443 }
444 }
445 else if (code == POINTER_PLUS_EXPR)
446 {
447 expr = gimple_assign_rhs1 (stmt);
448 offset = gimple_assign_rhs2 (stmt);
449 }
450 else
451 {
452 base = expr;
453 return;
454 }
455 }
456 else
457 {
458 /* FIXME: Handle PHI nodes in case like:
459 _12 = &MEM[(void *)&a + 2B] + _10;
460
461 <bb> [local count: 1073741824]:
462 # prephitmp_13 = PHI <_12, &MEM[(void *)&a + 2B]>
463 memcpy (prephitmp_13, p_7(D), 6); */
464 base = expr;
465 return;
466 }
467 }
468
469 if (TREE_CODE (expr)((enum tree_code) (expr)->base.code) == ADDR_EXPR)
470 expr = TREE_OPERAND (expr, 0)(*((const_cast<tree*> (tree_operand_check ((expr), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 470, __FUNCTION__)))))
;
471
472 /* Stash the reference for offset validation. */
473 ref = expr;
474
475 poly_int64 bitsize, bitpos;
476 tree var_off;
477 machine_mode mode;
478 int sign, reverse, vol;
479
480 /* Determine the base object or pointer of the reference and
481 the constant bit offset from the beginning of the base.
482 If the offset has a non-constant component, it will be in
483 VAR_OFF. MODE, SIGN, REVERSE, and VOL are write only and
484 unused here. */
485 base = get_inner_reference (expr, &bitsize, &bitpos, &var_off,
486 &mode, &sign, &reverse, &vol);
487
488 /* get_inner_reference is not expected to return null. */
489 gcc_assert (base != NULL)((void)(!(base != __null) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 489, __FUNCTION__), 0 : 0))
;
490
491 if (offset)
492 extend_offset_range (offset);
493
494 poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT(8));
495
496 /* Convert the poly_int64 offset to offset_int. The offset
497 should be constant but be prepared for it not to be just in
498 case. */
499 offset_int cstoff;
500 if (bytepos.is_constant (&cstoff))
501 {
502 offrange[0] += cstoff;
503 offrange[1] += cstoff;
504
505 /* Besides the reference saved above, also stash the offset
506 for validation. */
507 if (TREE_CODE (expr)((enum tree_code) (expr)->base.code) == COMPONENT_REF)
508 refoff = cstoff;
509 }
510 else
511 offrange[1] += maxobjsize;
512
513 if (var_off)
514 {
515 if (TREE_CODE (var_off)((enum tree_code) (var_off)->base.code) == INTEGER_CST)
516 {
517 cstoff = wi::to_offset (var_off);
518 offrange[0] += cstoff;
519 offrange[1] += cstoff;
520 }
521 else
522 offrange[1] += maxobjsize;
523 }
524
525 if (TREE_CODE (base)((enum tree_code) (base)->base.code) == MEM_REF)
526 {
527 tree memrefoff = fold_convert (ptrdiff_type_node, TREE_OPERAND (base, 1))fold_convert_loc (((location_t) 0), global_trees[TI_PTRDIFF_TYPE
], (*((const_cast<tree*> (tree_operand_check ((base), (
1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 527, __FUNCTION__))))))
;
528 extend_offset_range (memrefoff);
529
530 if (refoff != HOST_WIDE_INT_MIN(long) (1UL << (64 - 1))
531 && TREE_CODE (expr)((enum tree_code) (expr)->base.code) == COMPONENT_REF)
532 {
533 /* Bump up the offset of the referenced subobject to reflect
534 the offset to the enclosing object. For example, so that
535 in
536 struct S { char a, b[3]; } s[2];
537 strcpy (s[1].b, "1234");
538 REFOFF is set to s[1].b - (char*)s. */
539 offset_int off = tree_to_shwi (memrefoff);
540 refoff += off;
541
542 if (!integer_zerop (memrefoff)
543 && !COMPLETE_TYPE_P (TREE_TYPE (expr))(((tree_class_check ((((contains_struct_check ((expr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 543, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 543, __FUNCTION__))->type_common.size) != (tree) __null)
544 && multiple_of_p (sizetypesizetype_tab[(int) stk_sizetype], memrefoff,
545 TYPE_SIZE_UNIT (TREE_TYPE (base))((tree_class_check ((((contains_struct_check ((base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 545, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 545, __FUNCTION__))->type_common.size_unit)
, true))
546 /* A non-zero offset into an array of struct with flexible array
547 members implies that the array is empty because there is no
548 way to initialize such a member when it belongs to an array.
549 This must be some sort of a bug. */
550 refsize = 0;
551 }
552
553 base = TREE_OPERAND (base, 0)(*((const_cast<tree*> (tree_operand_check ((base), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 553, __FUNCTION__)))))
;
554 }
555
556 if (TREE_CODE (ref)((enum tree_code) (ref)->base.code) == COMPONENT_REF)
557 if (tree size = component_ref_size (ref))
558 if (TREE_CODE (size)((enum tree_code) (size)->base.code) == INTEGER_CST)
559 refsize = wi::to_offset (size);
560
561 if (TREE_CODE (base)((enum tree_code) (base)->base.code) == SSA_NAME)
562 set_base_and_offset (base);
563}
564
565/* Return error_mark_node if the signed offset exceeds the bounds
566 of the address space (PTRDIFF_MAX). Otherwise, return either BASE
567 or REF when the offset exceeds the bounds of the BASE or REF object,
568 and set OOBOFF to the past-the-end offset formed by the reference,
569 including its size. OOBOFF is initially setto the range of offsets,
570 and OOBOFF[2] to the offset of the first write access (nonzero for
571 the strcat family). When STRICT is nonzero use REF size, when
572 available, otherwise use BASE size. When STRICT is greater than 1,
573 use the size of the last array member as the bound, otherwise treat
574 such a member as a flexible array member. Return NULL when the offset
575 is in bounds. */
576
577tree
578builtin_memref::offset_out_of_bounds (int strict, offset_int ooboff[3]) const
579{
580 if (!ptr)
581 return NULL_TREE(tree) __null;
582
583 /* The offset of the first write access or zero. */
584 offset_int wroff = ooboff[2];
585
586 /* A temporary, possibly adjusted, copy of the offset range. */
587 offset_int offrng[2] = { ooboff[0], ooboff[1] };
588
589 if (DECL_P (base)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (base)->base.code))] == tcc_declaration)
&& TREE_CODE (TREE_TYPE (base))((enum tree_code) (((contains_struct_check ((base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 589, __FUNCTION__))->typed.type))->base.code)
== ARRAY_TYPE)
590 {
591 /* Check for offset in an anti-range with a negative lower bound.
592 For such a range, consider only the non-negative subrange. */
593 if (offrng[1] < offrng[0] && offrng[1] < 0)
594 offrng[1] = maxobjsize;
595 }
596
597 /* Conservative offset of the last byte of the referenced object. */
598 offset_int endoff;
599
600 /* The bounds need not be ordered. Set HIB to use as the index
601 of the larger of the bounds and LOB as the opposite. */
602 bool hib = wi::les_p (offrng[0], offrng[1]);
603 bool lob = !hib;
604
605 /* Set to the size remaining in the object after subtracting
606 REFOFF. It may become negative as a result of negative indices
607 into the enclosing object, such as in:
608 extern struct S { char a[4], b[3], c[1]; } *p;
609 strcpy (p[-3].b, "123"); */
610 offset_int size = basesize;
611 tree obj = base;
612
613 const bool decl_p = DECL_P (obj)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (obj)->base.code))] == tcc_declaration)
;
614
615 if (basesize < 0)
616 {
617 endoff = offrng[lob] + (sizrange[0] - wroff);
618
619 /* For a reference through a pointer to an object of unknown size
620 all initial offsets are considered valid, positive as well as
621 negative, since the pointer itself can point past the beginning
622 of the object. However, the sum of the lower bound of the offset
623 and that of the size must be less than or equal than PTRDIFF_MAX. */
624 if (endoff > maxobjsize)
625 return error_mark_nodeglobal_trees[TI_ERROR_MARK];
626
627 /* When the referenced subobject is known, the end offset must be
628 within its bounds. Otherwise there is nothing to do. */
629 if (strict
630 && !decl_p
631 && ref
632 && refsize >= 0
633 && TREE_CODE (ref)((enum tree_code) (ref)->base.code) == COMPONENT_REF)
634 {
635 /* If REFOFF is negative, SIZE will become negative here. */
636 size = refoff + refsize;
637 obj = ref;
638 }
639 else
640 return NULL_TREE(tree) __null;
641 }
642
643 /* A reference to an object of known size must be within the bounds
644 of either the base object or the subobject (see above for when
645 a subobject can be used). */
646 if ((decl_p && offrng[hib] < 0) || offrng[lob] > size)
647 return obj;
648
649 /* The extent of the reference must also be within the bounds of
650 the base object (if known) or the subobject or the maximum object
651 size otherwise. */
652 endoff = offrng[lob] + sizrange[0];
653 if (endoff > maxobjsize)
654 return error_mark_nodeglobal_trees[TI_ERROR_MARK];
655
656 if (strict
657 && decl_p
658 && ref
659 && refsize >= 0
660 && TREE_CODE (ref)((enum tree_code) (ref)->base.code) == COMPONENT_REF)
661 {
662 /* If the reference is to a member subobject of a declared object,
663 the offset must be within the bounds of the subobject. */
664 size = refoff + refsize;
665 obj = ref;
666 }
667
668 if (endoff <= size)
669 return NULL_TREE(tree) __null;
670
671 /* Set the out-of-bounds offset range to be one greater than
672 that delimited by the reference including its size. */
673 ooboff[lob] = size;
674
675 if (endoff > ooboff[lob])
676 ooboff[hib] = endoff - 1;
677 else
678 ooboff[hib] = offrng[lob] + sizrange[1];
679
680 return obj;
681}
682
683/* Create an association between the memory references DST and SRC
684 for access by a call EXPR to a memory or string built-in funtion. */
685
686builtin_access::builtin_access (pointer_query &ptrqry, gimple *call,
687 builtin_memref &dst,
688 builtin_memref &src)
689: dstref (&dst), srcref (&src), sizrange (), ovloff (), ovlsiz (),
690 dstoff (), srcoff (), dstsiz (), srcsiz ()
691{
692 dstoff[0] = dst.offrange[0];
693 dstoff[1] = dst.offrange[1];
694
695 /* Zero out since the offset_int ctors invoked above are no-op. */
696 srcoff[0] = srcoff[1] = 0;
697 dstsiz[0] = dstsiz[1] = 0;
698 srcsiz[0] = srcsiz[1] = 0;
699
700 /* Object Size Type to use to determine the size of the destination
701 and source objects. Overridden below for raw memory functions. */
702 int ostype = 1;
703
704 /* True when the size of one reference depends on the offset of
705 itself or the other. */
706 bool depends_p = true;
707
708 /* True when the size of the destination reference DSTREF has been
709 determined from SRCREF and so needs to be adjusted by the latter's
710 offset. Only meaningful for bounded string functions like strncpy. */
711 bool dstadjust_p = false;
712
713 /* The size argument number (depends on the built-in). */
714 unsigned sizeargno = 2;
715
716 tree func = gimple_call_fndecl (call);
717 switch (DECL_FUNCTION_CODE (func))
718 {
719 case BUILT_IN_MEMCPY:
720 case BUILT_IN_MEMCPY_CHK:
721 case BUILT_IN_MEMPCPY:
722 case BUILT_IN_MEMPCPY_CHK:
723 ostype = 0;
724 depends_p = false;
725 detect_overlap = &builtin_access::generic_overlap;
726 break;
727
728 case BUILT_IN_MEMMOVE:
729 case BUILT_IN_MEMMOVE_CHK:
730 /* For memmove there is never any overlap to check for. */
731 ostype = 0;
732 depends_p = false;
733 detect_overlap = &builtin_access::no_overlap;
734 break;
735
736 case BUILT_IN_MEMSET:
737 case BUILT_IN_MEMSET_CHK:
738 /* For memset there is never any overlap to check for. */
739 ostype = 0;
740 depends_p = false;
741 detect_overlap = &builtin_access::no_overlap;
742 break;
743
744 case BUILT_IN_STPNCPY:
745 case BUILT_IN_STPNCPY_CHK:
746 case BUILT_IN_STRNCPY:
747 case BUILT_IN_STRNCPY_CHK:
748 dstref->strbounded_p = true;
749 detect_overlap = &builtin_access::strcpy_overlap;
750 break;
751
752 case BUILT_IN_STPCPY:
753 case BUILT_IN_STPCPY_CHK:
754 case BUILT_IN_STRCPY:
755 case BUILT_IN_STRCPY_CHK:
756 detect_overlap = &builtin_access::strcpy_overlap;
757 break;
758
759 case BUILT_IN_STRCAT:
760 case BUILT_IN_STRCAT_CHK:
761 detect_overlap = &builtin_access::strcat_overlap;
762 break;
763
764 case BUILT_IN_STRNCAT:
765 case BUILT_IN_STRNCAT_CHK:
766 dstref->strbounded_p = true;
767 srcref->strbounded_p = true;
768 detect_overlap = &builtin_access::strcat_overlap;
769 break;
770
771 default:
772 /* Handle other string functions here whose access may need
773 to be validated for in-bounds offsets and non-overlapping
774 copies. */
775 return;
776 }
777
778 /* Try to determine the size of the base object. compute_objsize
779 expects a pointer so create one if BASE is a non-pointer object. */
780 if (dst.basesize < 0)
781 {
782 access_ref aref;
783 if (ptrqry.get_ref (dst.base, call, &aref, ostype) && aref.base0)
784 dst.basesize = aref.sizrng[1];
785 else
786 dst.basesize = HOST_WIDE_INT_MIN(long) (1UL << (64 - 1));
787 }
788
789 if (src.base && src.basesize < 0)
790 {
791 access_ref aref;
792 if (ptrqry.get_ref (src.base, call, &aref, ostype) && aref.base0)
793 src.basesize = aref.sizrng[1];
794 else
795 src.basesize = HOST_WIDE_INT_MIN(long) (1UL << (64 - 1));
796 }
797
798 const offset_int maxobjsize = dst.maxobjsize;
799
800 /* Make adjustments for references to the same object by string
801 built-in functions to reflect the constraints imposed by
802 the function. */
803
804 /* For bounded string functions determine the range of the bound
805 on the access. For others, the range stays unbounded. */
806 offset_int bounds[2] = { maxobjsize, maxobjsize };
807 if (dstref->strbounded_p)
808 {
809 unsigned nargs = gimple_call_num_args (call);
810 if (nargs <= sizeargno)
811 return;
812
813 tree size = gimple_call_arg (call, sizeargno);
814 tree range[2];
815 if (get_size_range (ptrqry.rvals, size, call, range, true))
816 {
817 bounds[0] = wi::to_offset (range[0]);
818 bounds[1] = wi::to_offset (range[1]);
819 }
820
821 /* If both references' size ranges are indeterminate use the last
822 (size) argument from the function call as a substitute. This
823 may only be necessary for strncpy (but not for memcpy where
824 the size range would have been already determined this way). */
825 if (dstref->sizrange[0] == 0 && dstref->sizrange[1] == maxobjsize
826 && srcref->sizrange[0] == 0 && srcref->sizrange[1] == maxobjsize)
827 {
828 dstref->sizrange[0] = bounds[0];
829 dstref->sizrange[1] = bounds[1];
830 }
831 }
832
833 bool dstsize_set = false;
834 /* The size range of one reference involving the same base object
835 can be determined from the size range of the other reference.
836 This makes it possible to compute accurate offsets for warnings
837 involving functions like strcpy where the length of just one of
838 the two arguments is known (determined by tree-ssa-strlen). */
839 if (dstref->sizrange[0] == 0 && dstref->sizrange[1] == maxobjsize)
840 {
841 /* When the destination size is unknown set it to the size of
842 the source. */
843 dstref->sizrange[0] = srcref->sizrange[0];
844 dstref->sizrange[1] = srcref->sizrange[1];
845 dstsize_set = true;
846 }
847 else if (srcref->sizrange[0] == 0 && srcref->sizrange[1] == maxobjsize)
848 {
849 /* When the size of the source access is unknown set it to the size
850 of the destination first and adjust it later if necessary. */
851 srcref->sizrange[0] = dstref->sizrange[0];
852 srcref->sizrange[1] = dstref->sizrange[1];
853
854 if (depends_p)
855 {
856 if (dstref->strbounded_p)
857 {
858 /* Read access by strncpy is constrained by the third
859 argument but except for a zero bound is at least one. */
860 srcref->sizrange[0] = bounds[1] > 0 ? 1 : 0;
861 offset_int bound = wi::umin (srcref->basesize, bounds[1]);
862 if (bound < srcref->sizrange[1])
863 srcref->sizrange[1] = bound;
864 }
865 /* For string functions, adjust the size range of the source
866 reference by the inverse boundaries of the offset (because
867 the higher the offset into the string the shorter its
868 length). */
869 if (srcref->offrange[1] >= 0
870 && srcref->offrange[1] < srcref->sizrange[0])
871 srcref->sizrange[0] -= srcref->offrange[1];
872 else
873 srcref->sizrange[0] = 1;
874
875 if (srcref->offrange[0] > 0)
876 {
877 if (srcref->offrange[0] < srcref->sizrange[1])
878 srcref->sizrange[1] -= srcref->offrange[0];
879 else
880 srcref->sizrange[1] = 0;
881 }
882
883 dstadjust_p = true;
884 }
885 }
886
887 if (detect_overlap == &builtin_access::generic_overlap)
888 {
889 if (dstref->strbounded_p)
890 {
891 dstref->sizrange[0] = bounds[0];
892 dstref->sizrange[1] = bounds[1];
893
894 if (dstref->sizrange[0] < srcref->sizrange[0])
895 srcref->sizrange[0] = dstref->sizrange[0];
896
897 if (dstref->sizrange[1] < srcref->sizrange[1])
898 srcref->sizrange[1] = dstref->sizrange[1];
899 }
900 }
901 else if (detect_overlap == &builtin_access::strcpy_overlap)
902 {
903 if (!dstref->strbounded_p)
904 {
905 /* For strcpy, adjust the destination size range to match that
906 of the source computed above. */
907 if (depends_p && dstadjust_p)
908 {
909 dstref->sizrange[0] = srcref->sizrange[0];
910 dstref->sizrange[1] = srcref->sizrange[1];
911 }
912 }
913 }
914 else if (!dstsize_set && detect_overlap == &builtin_access::strcat_overlap)
915 {
916 dstref->sizrange[0] += srcref->sizrange[0] - 1;
917 dstref->sizrange[1] += srcref->sizrange[1] - 1;
918 }
919
920 if (dstref->strbounded_p)
921 {
922 /* For strncpy, adjust the destination size range to match that
923 of the source computed above. */
924 dstref->sizrange[0] = bounds[0];
925 dstref->sizrange[1] = bounds[1];
926
927 if (bounds[0] < srcref->sizrange[0])
928 srcref->sizrange[0] = bounds[0];
929
930 if (bounds[1] < srcref->sizrange[1])
931 srcref->sizrange[1] = bounds[1];
932 }
933}
934
935offset_int
936builtin_access::overlap_size (const offset_int a[2], const offset_int b[2],
937 offset_int *off)
938{
939 const offset_int *p = a;
940 const offset_int *q = b;
941
942 /* Point P at the bigger of the two ranges and Q at the smaller. */
943 if (wi::lts_p (a[1] - a[0], b[1] - b[0]))
36
Assuming the condition is false
37
Taking false branch
56
Assuming the condition is false
57
Taking false branch
944 {
945 p = b;
946 q = a;
947 }
948
949 if (p[0] < q[0])
38
Assuming the condition is false
39
Taking false branch
58
Assuming the condition is false
59
Taking false branch
950 {
951 if (p[1] < q[0])
952 return 0;
953
954 *off = q[0];
955 return wi::smin (p[1], q[1]) - q[0];
956 }
957
958 if (q[1] < p[0])
40
Assuming the condition is true
41
Taking true branch
60
Assuming the condition is true
61
Taking true branch
959 return 0;
42
Returning without writing to 'off->len'
62
Returning without writing to 'off->len'
960
961 off[0] = p[0];
962 return q[1] - p[0];
963}
964
965/* Return true if the bounded mempry (memcpy amd similar) or string function
966 access (strncpy and similar) ACS overlaps. */
967
968bool
969builtin_access::generic_overlap ()
970{
971 builtin_access &acs = *this;
972 const builtin_memref *dstref = acs.dstref;
973 const builtin_memref *srcref = acs.srcref;
974
975 gcc_assert (dstref->base == srcref->base)((void)(!(dstref->base == srcref->base) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 975, __FUNCTION__), 0 : 0))
;
2
Assuming 'dstref->base' is equal to 'srcref->base'
3
'?' condition is false
976
977 const offset_int maxobjsize = acs.dstref->maxobjsize;
978
979 offset_int maxsize = dstref->basesize < 0 ? maxobjsize : dstref->basesize;
4
'?' condition is false
980
981 /* Adjust the larger bounds of the offsets (which may be the first
982 element if the lower bound is larger than the upper bound) to
983 make them valid for the smallest access (if possible) but no smaller
984 than the smaller bounds. */
985 gcc_assert (wi::les_p (acs.dstoff[0], acs.dstoff[1]))((void)(!(wi::les_p (acs.dstoff[0], acs.dstoff[1])) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 985, __FUNCTION__), 0 : 0))
;
5
'?' condition is false
986
987 if (maxsize < acs.dstoff[1] + acs.dstsiz[0])
6
Assuming the condition is false
7
Taking false branch
988 acs.dstoff[1] = maxsize - acs.dstsiz[0];
989 if (acs.dstoff[1] < acs.dstoff[0])
8
Assuming the condition is false
9
Taking false branch
990 acs.dstoff[1] = acs.dstoff[0];
991
992 gcc_assert (wi::les_p (acs.srcoff[0], acs.srcoff[1]))((void)(!(wi::les_p (acs.srcoff[0], acs.srcoff[1])) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 992, __FUNCTION__), 0 : 0))
;
10
'?' condition is false
993
994 if (maxsize < acs.srcoff[1] + acs.srcsiz[0])
11
Assuming the condition is false
12
Taking false branch
995 acs.srcoff[1] = maxsize - acs.srcsiz[0];
996 if (acs.srcoff[1] < acs.srcoff[0])
13
Assuming the condition is false
14
Taking false branch
997 acs.srcoff[1] = acs.srcoff[0];
998
999 /* Determine the minimum and maximum space for the access given
1000 the offsets. */
1001 offset_int space[2];
1002 space[0] = wi::abs (acs.dstoff[0] - acs.srcoff[0]);
1003 space[1] = space[0];
1004
1005 offset_int d = wi::abs (acs.dstoff[0] - acs.srcoff[1]);
1006 if (acs.srcsiz[0] > 0)
15
Assuming the condition is false
16
Taking false branch
1007 {
1008 if (d < space[0])
1009 space[0] = d;
1010
1011 if (space[1] < d)
1012 space[1] = d;
1013 }
1014 else
1015 space[1] = acs.dstsiz[1];
1016
1017 d = wi::abs (acs.dstoff[1] - acs.srcoff[0]);
1018 if (d < space[0])
17
Assuming the condition is false
18
Taking false branch
1019 space[0] = d;
1020
1021 if (space[1] < d)
19
Assuming the condition is false
20
Taking false branch
1022 space[1] = d;
1023
1024 /* Treat raw memory functions both of whose references are bounded
1025 as special and permit uncertain overlaps to go undetected. For
1026 all kinds of constant offset and constant size accesses, if
1027 overlap isn't certain it is not possible. */
1028 bool overlap_possible = space[0] < acs.dstsiz[1];
1029 if (!overlap_possible)
21
Assuming 'overlap_possible' is true
22
Taking false branch
1030 return false;
1031
1032 bool overlap_certain = space[1] < acs.dstsiz[0];
1033
1034 /* True when the size of one reference depends on the offset of
1035 the other. */
1036 bool depends_p = detect_overlap != &builtin_access::generic_overlap;
1037
1038 if (!overlap_certain)
23
Assuming 'overlap_certain' is true
1039 {
1040 if (!dstref->strbounded_p && !depends_p)
1041 /* Memcpy only considers certain overlap. */
1042 return false;
1043
1044 /* There's no way to distinguish an access to the same member
1045 of a structure from one to two distinct members of the same
1046 structure. Give up to avoid excessive false positives. */
1047 tree basetype = TREE_TYPE (dstref->base)((contains_struct_check ((dstref->base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1047, __FUNCTION__))->typed.type)
;
1048
1049 if (POINTER_TYPE_P (basetype)(((enum tree_code) (basetype)->base.code) == POINTER_TYPE ||
((enum tree_code) (basetype)->base.code) == REFERENCE_TYPE
)
)
1050 basetype = TREE_TYPE (basetype)((contains_struct_check ((basetype), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1050, __FUNCTION__))->typed.type)
;
1051 else
1052 while (TREE_CODE (basetype)((enum tree_code) (basetype)->base.code) == ARRAY_TYPE)
1053 basetype = TREE_TYPE (basetype)((contains_struct_check ((basetype), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1053, __FUNCTION__))->typed.type)
;
1054
1055 if (RECORD_OR_UNION_TYPE_P (basetype)(((enum tree_code) (basetype)->base.code) == RECORD_TYPE ||
((enum tree_code) (basetype)->base.code) == UNION_TYPE ||
((enum tree_code) (basetype)->base.code) == QUAL_UNION_TYPE
)
)
1056 return false;
1057 }
1058
1059 /* True for stpcpy and strcpy. */
1060 bool stxcpy_p = (!dstref->strbounded_p
24
Assuming field 'strbounded_p' is false
1061 && detect_overlap == &builtin_access::strcpy_overlap);
1062
1063 if (dstref->refoff >= 0
25
Taking false branch
1064 && srcref->refoff >= 0
1065 && dstref->refoff != srcref->refoff
1066 && (stxcpy_p || dstref->strbounded_p || srcref->strbounded_p))
1067 return false;
1068
1069 offset_int siz[2] = { maxobjsize + 1, 0 };
1070
1071 ovloff[0] = HOST_WIDE_INT_MAX(~((long) (1UL << (64 - 1))));
1072 ovloff[1] = HOST_WIDE_INT_MIN(long) (1UL << (64 - 1));
1073
1074 if (stxcpy_p)
26
Assuming 'stxcpy_p' is true
27
Taking true branch
1075 {
1076 /* Iterate over the extreme locations (on the horizontal axis formed
1077 by their offsets) and sizes of two regions and find their smallest
1078 and largest overlap and the corresponding offsets. */
1079 for (unsigned i = 0; i != 2; ++i)
28
Loop condition is true. Entering loop body
48
Loop condition is true. Entering loop body
1080 {
1081 const offset_int a[2] = {
1082 acs.dstoff[i], acs.dstoff[i] + acs.dstsiz[!i]
1083 };
1084
1085 const offset_int b[2] = {
1086 acs.srcoff[i], acs.srcoff[i] + acs.srcsiz[!i]
1087 };
1088
1089 offset_int off;
29
Calling default constructor for 'generic_wide_int<fixed_wide_int_storage<128>>'
34
Returning from default constructor for 'generic_wide_int<fixed_wide_int_storage<128>>'
49
Calling default constructor for 'generic_wide_int<fixed_wide_int_storage<128>>'
54
Returning from default constructor for 'generic_wide_int<fixed_wide_int_storage<128>>'
1090 offset_int sz = overlap_size (a, b, &off);
35
Calling 'builtin_access::overlap_size'
43
Returning from 'builtin_access::overlap_size'
55
Calling 'builtin_access::overlap_size'
63
Returning from 'builtin_access::overlap_size'
1091 if (sz < siz[0])
44
Assuming the condition is false
45
Taking false branch
64
Assuming the condition is false
65
Taking false branch
1092 siz[0] = sz;
1093
1094 if (siz[1] <= sz)
46
Taking false branch
66
Taking true branch
1095 siz[1] = sz;
1096
1097 if (sz != 0)
47
Taking false branch
67
Taking true branch
1098 {
1099 if (wi::lts_p (off, ovloff[0]))
68
Calling 'lts_p<generic_wide_int<fixed_wide_int_storage<128>>, long>'
1100 ovloff[0] = off.to_shwi ();
1101 if (wi::lts_p (ovloff[1], off))
1102 ovloff[1] = off.to_shwi ();
1103 }
1104 }
1105 }
1106 else
1107 {
1108 /* Iterate over the extreme locations (on the horizontal axis
1109 formed by their offsets) and sizes of the two regions and
1110 find their smallest and largest overlap and the corresponding
1111 offsets. */
1112
1113 for (unsigned io = 0; io != 2; ++io)
1114 for (unsigned is = 0; is != 2; ++is)
1115 {
1116 const offset_int a[2] = {
1117 acs.dstoff[io], acs.dstoff[io] + acs.dstsiz[is]
1118 };
1119
1120 for (unsigned jo = 0; jo != 2; ++jo)
1121 for (unsigned js = 0; js != 2; ++js)
1122 {
1123 const offset_int b[2] = {
1124 acs.srcoff[jo], acs.srcoff[jo] + acs.srcsiz[js]
1125 };
1126
1127 offset_int off;
1128 offset_int sz = overlap_size (a, b, &off);
1129 if (sz < siz[0])
1130 siz[0] = sz;
1131
1132 if (siz[1] <= sz)
1133 siz[1] = sz;
1134
1135 if (sz != 0)
1136 {
1137 if (wi::lts_p (off, ovloff[0]))
1138 ovloff[0] = off.to_shwi ();
1139 if (wi::lts_p (ovloff[1], off))
1140 ovloff[1] = off.to_shwi ();
1141 }
1142 }
1143 }
1144 }
1145
1146 ovlsiz[0] = siz[0].to_shwi ();
1147 ovlsiz[1] = siz[1].to_shwi ();
1148
1149 /* Adjust the overlap offset range to reflect the overlap size range. */
1150 if (ovlsiz[0] == 0 && ovlsiz[1] > 1)
1151 ovloff[1] = ovloff[0] + ovlsiz[1] - 1;
1152
1153 return true;
1154}
1155
1156/* Return true if the strcat-like access overlaps. */
1157
1158bool
1159builtin_access::strcat_overlap ()
1160{
1161 builtin_access &acs = *this;
1162 const builtin_memref *dstref = acs.dstref;
1163 const builtin_memref *srcref = acs.srcref;
1164
1165 gcc_assert (dstref->base == srcref->base)((void)(!(dstref->base == srcref->base) ? fancy_abort (
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1165, __FUNCTION__), 0 : 0))
;
1166
1167 const offset_int maxobjsize = acs.dstref->maxobjsize;
1168
1169 gcc_assert (dstref->base && dstref->base == srcref->base)((void)(!(dstref->base && dstref->base == srcref
->base) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1169, __FUNCTION__), 0 : 0))
;
1170
1171 /* Adjust for strcat-like accesses. */
1172
1173 /* As a special case for strcat, set the DSTREF offsets to the length
1174 of the destination string since the function starts writing over
1175 its terminating nul, and set the destination size to 1 for the length
1176 of the nul. */
1177 acs.dstoff[0] += dstsiz[0] - srcref->sizrange[0];
1178 acs.dstoff[1] += dstsiz[1] - srcref->sizrange[1];
1179
1180 bool strfunc_unknown_args = acs.dstsiz[0] == 0 && acs.dstsiz[1] != 0;
1181
1182 /* The lower bound is zero when the size is unknown because then
1183 overlap is not certain. */
1184 acs.dstsiz[0] = strfunc_unknown_args ? 0 : 1;
1185 acs.dstsiz[1] = 1;
1186
1187 offset_int maxsize = dstref->basesize < 0 ? maxobjsize : dstref->basesize;
1188
1189 /* For references to the same base object, determine if there's a pair
1190 of valid offsets into the two references such that access between
1191 them doesn't overlap. Adjust both upper bounds to be valid for
1192 the smaller size (i.e., at most MAXSIZE - SIZE). */
1193
1194 if (maxsize < acs.dstoff[1] + acs.dstsiz[0])
1195 acs.dstoff[1] = maxsize - acs.dstsiz[0];
1196
1197 if (maxsize < acs.srcoff[1] + acs.srcsiz[0])
1198 acs.srcoff[1] = maxsize - acs.srcsiz[0];
1199
1200 /* Check to see if there's enough space for both accesses without
1201 overlap. Determine the optimistic (maximum) amount of available
1202 space. */
1203 offset_int space;
1204 if (acs.dstoff[0] <= acs.srcoff[0])
1205 {
1206 if (acs.dstoff[1] < acs.srcoff[1])
1207 space = acs.srcoff[1] + acs.srcsiz[0] - acs.dstoff[0];
1208 else
1209 space = acs.dstoff[1] + acs.dstsiz[0] - acs.srcoff[0];
1210 }
1211 else
1212 space = acs.dstoff[1] + acs.dstsiz[0] - acs.srcoff[0];
1213
1214 /* Overlap is certain if the distance between the farthest offsets
1215 of the opposite accesses is less than the sum of the lower bounds
1216 of the sizes of the two accesses. */
1217 bool overlap_certain = space < acs.dstsiz[0] + acs.srcsiz[0];
1218
1219 /* For a constant-offset, constant size access, consider the largest
1220 distance between the offset bounds and the lower bound of the access
1221 size. If the overlap isn't certain return success. */
1222 if (!overlap_certain
1223 && acs.dstoff[0] == acs.dstoff[1]
1224 && acs.srcoff[0] == acs.srcoff[1]
1225 && acs.dstsiz[0] == acs.dstsiz[1]
1226 && acs.srcsiz[0] == acs.srcsiz[1])
1227 return false;
1228
1229 /* Overlap is not certain but may be possible. */
1230
1231 offset_int access_min = acs.dstsiz[0] + acs.srcsiz[0];
1232
1233 /* Determine the conservative (minimum) amount of space. */
1234 space = wi::abs (acs.dstoff[0] - acs.srcoff[0]);
1235 offset_int d = wi::abs (acs.dstoff[0] - acs.srcoff[1]);
1236 if (d < space)
1237 space = d;
1238 d = wi::abs (acs.dstoff[1] - acs.srcoff[0]);
1239 if (d < space)
1240 space = d;
1241
1242 /* For a strict test (used for strcpy and similar with unknown or
1243 variable bounds or sizes), consider the smallest distance between
1244 the offset bounds and either the upper bound of the access size
1245 if known, or the lower bound otherwise. */
1246 if (access_min <= space && (access_min != 0 || !strfunc_unknown_args))
1247 return false;
1248
1249 /* When strcat overlap is certain it is always a single byte:
1250 the terminating NUL, regardless of offsets and sizes. When
1251 overlap is only possible its range is [0, 1]. */
1252 acs.ovlsiz[0] = dstref->sizrange[0] == dstref->sizrange[1] ? 1 : 0;
1253 acs.ovlsiz[1] = 1;
1254
1255 offset_int endoff
1256 = dstref->offrange[0] + (dstref->sizrange[0] - srcref->sizrange[0]);
1257 if (endoff <= srcref->offrange[0])
1258 acs.ovloff[0] = wi::smin (maxobjsize, srcref->offrange[0]).to_shwi ();
1259 else
1260 acs.ovloff[0] = wi::smin (maxobjsize, endoff).to_shwi ();
1261
1262 acs.sizrange[0] = wi::smax (wi::abs (endoff - srcref->offrange[0]) + 1,
1263 srcref->sizrange[0]).to_shwi ();
1264 if (dstref->offrange[0] == dstref->offrange[1])
1265 {
1266 if (srcref->offrange[0] == srcref->offrange[1])
1267 acs.ovloff[1] = acs.ovloff[0];
1268 else
1269 acs.ovloff[1]
1270 = wi::smin (maxobjsize,
1271 srcref->offrange[1] + srcref->sizrange[1]).to_shwi ();
1272 }
1273 else
1274 acs.ovloff[1]
1275 = wi::smin (maxobjsize,
1276 dstref->offrange[1] + dstref->sizrange[1]).to_shwi ();
1277
1278 if (acs.sizrange[0] == 0)
1279 acs.sizrange[0] = 1;
1280 acs.sizrange[1] = wi::smax (acs.dstsiz[1], srcref->sizrange[1]).to_shwi ();
1281 return true;
1282}
1283
1284/* Return true if the strcpy-like access overlaps. */
1285
1286bool
1287builtin_access::strcpy_overlap ()
1288{
1289 return generic_overlap ();
1
Calling 'builtin_access::generic_overlap'
1290}
1291
1292/* For a BASE of array type, clamp REFOFF to at most [0, BASE_SIZE]
1293 if known, or [0, MAXOBJSIZE] otherwise. */
1294
1295static void
1296clamp_offset (tree base, offset_int refoff[2], offset_int maxobjsize)
1297{
1298 if (!base || TREE_CODE (TREE_TYPE (base))((enum tree_code) (((contains_struct_check ((base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1298, __FUNCTION__))->typed.type))->base.code)
!= ARRAY_TYPE)
1299 return;
1300
1301 if (refoff[0] < 0 && refoff[1] >= 0)
1302 refoff[0] = 0;
1303
1304 if (refoff[1] < refoff[0])
1305 {
1306 offset_int maxsize = maxobjsize;
1307 if (tree size = TYPE_SIZE_UNIT (TREE_TYPE (base))((tree_class_check ((((contains_struct_check ((base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1307, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1307, __FUNCTION__))->type_common.size_unit)
)
1308 maxsize = wi::to_offset (size);
1309
1310 refoff[1] = wi::umin (refoff[1], maxsize);
1311 }
1312}
1313
1314/* Return true if DSTREF and SRCREF describe accesses that either overlap
1315 one another or that, in order not to overlap, would imply that the size
1316 of the referenced object(s) exceeds the maximum size of an object. Set
1317 Otherwise, if DSTREF and SRCREF do not definitely overlap (even though
1318 they may overlap in a way that's not apparent from the available data),
1319 return false. */
1320
1321bool
1322builtin_access::overlap ()
1323{
1324 builtin_access &acs = *this;
1325
1326 const offset_int maxobjsize = dstref->maxobjsize;
1327
1328 acs.sizrange[0] = wi::smax (dstref->sizrange[0],
1329 srcref->sizrange[0]).to_shwi ();
1330 acs.sizrange[1] = wi::smax (dstref->sizrange[1],
1331 srcref->sizrange[1]).to_shwi ();
1332
1333 /* Check to see if the two references refer to regions that are
1334 too large not to overlap in the address space (whose maximum
1335 size is PTRDIFF_MAX). */
1336 offset_int size = dstref->sizrange[0] + srcref->sizrange[0];
1337 if (maxobjsize < size)
1338 {
1339 acs.ovloff[0] = (maxobjsize - dstref->sizrange[0]).to_shwi ();
1340 acs.ovlsiz[0] = (size - maxobjsize).to_shwi ();
1341 return true;
1342 }
1343
1344 /* If both base objects aren't known return the maximum possible
1345 offset that would make them not overlap. */
1346 if (!dstref->base || !srcref->base)
1347 return false;
1348
1349 /* If the base object is an array adjust the bounds of the offset
1350 to be non-negative and within the bounds of the array if possible. */
1351 clamp_offset (dstref->base, acs.dstoff, maxobjsize);
1352
1353 acs.srcoff[0] = srcref->offrange[0];
1354 acs.srcoff[1] = srcref->offrange[1];
1355
1356 clamp_offset (srcref->base, acs.srcoff, maxobjsize);
1357
1358 /* When the upper bound of the offset is less than the lower bound
1359 the former is the result of a negative offset being represented
1360 as a large positive value or vice versa. The resulting range is
1361 a union of two subranges: [MIN, UB] and [LB, MAX]. Since such
1362 a union is not representable using the current data structure
1363 replace it with the full range of offsets. */
1364 if (acs.dstoff[1] < acs.dstoff[0])
1365 {
1366 acs.dstoff[0] = -maxobjsize - 1;
1367 acs.dstoff[1] = maxobjsize;
1368 }
1369
1370 /* Validate the offset and size of each reference on its own first.
1371 This is independent of whether or not the base objects are the
1372 same. Normally, this would have already been detected and
1373 diagnosed by -Warray-bounds, unless it has been disabled. */
1374 offset_int maxoff = acs.dstoff[0] + dstref->sizrange[0];
1375 if (maxobjsize < maxoff)
1376 {
1377 acs.ovlsiz[0] = (maxoff - maxobjsize).to_shwi ();
1378 acs.ovloff[0] = acs.dstoff[0].to_shwi () - acs.ovlsiz[0];
1379 return true;
1380 }
1381
1382 /* Repeat the same as above but for the source offsets. */
1383 if (acs.srcoff[1] < acs.srcoff[0])
1384 {
1385 acs.srcoff[0] = -maxobjsize - 1;
1386 acs.srcoff[1] = maxobjsize;
1387 }
1388
1389 maxoff = acs.srcoff[0] + srcref->sizrange[0];
1390 if (maxobjsize < maxoff)
1391 {
1392 acs.ovlsiz[0] = (maxoff - maxobjsize).to_shwi ();
1393 acs.ovlsiz[1] = (acs.srcoff[0] + srcref->sizrange[1]
1394 - maxobjsize).to_shwi ();
1395 acs.ovloff[0] = acs.srcoff[0].to_shwi () - acs.ovlsiz[0];
1396 return true;
1397 }
1398
1399 if (dstref->base != srcref->base)
1400 return false;
1401
1402 acs.dstsiz[0] = dstref->sizrange[0];
1403 acs.dstsiz[1] = dstref->sizrange[1];
1404
1405 acs.srcsiz[0] = srcref->sizrange[0];
1406 acs.srcsiz[1] = srcref->sizrange[1];
1407
1408 /* Call the appropriate function to determine the overlap. */
1409 if ((this->*detect_overlap) ())
1410 {
1411 if (!sizrange[1])
1412 {
1413 /* Unless the access size range has already been set, do so here. */
1414 sizrange[0] = wi::smax (acs.dstsiz[0], srcref->sizrange[0]).to_shwi ();
1415 sizrange[1] = wi::smax (acs.dstsiz[1], srcref->sizrange[1]).to_shwi ();
1416 }
1417 return true;
1418 }
1419
1420 return false;
1421}
1422
1423/* Attempt to detect and diagnose an overlapping copy in a call expression
1424 EXPR involving an access ACS to a built-in memory or string function.
1425 Return true when one has been detected, false otherwise. */
1426
1427static bool
1428maybe_diag_overlap (location_t loc, gimple *call, builtin_access &acs)
1429{
1430 if (!acs.overlap ())
1431 return false;
1432
1433 if (warning_suppressed_p (call, OPT_Wrestrict))
1434 return true;
1435
1436 /* For convenience. */
1437 const builtin_memref &dstref = *acs.dstref;
1438 const builtin_memref &srcref = *acs.srcref;
1439
1440 /* Determine the range of offsets and sizes of the overlap if it
1441 exists and issue diagnostics. */
1442 HOST_WIDE_INTlong *ovloff = acs.ovloff;
1443 HOST_WIDE_INTlong *ovlsiz = acs.ovlsiz;
1444 HOST_WIDE_INTlong *sizrange = acs.sizrange;
1445
1446 tree func = gimple_call_fndecl (call);
1447
1448 /* To avoid a combinatorial explosion of diagnostics format the offsets
1449 or their ranges as strings and use them in the warning calls below. */
1450 char offstr[3][64];
1451
1452 if (dstref.offrange[0] == dstref.offrange[1]
1453 || dstref.offrange[1] > HOST_WIDE_INT_MAX(~((long) (1UL << (64 - 1)))))
1454 sprintf (offstr[0], HOST_WIDE_INT_PRINT_DEC"%" "l" "d",
1455 dstref.offrange[0].to_shwi ());
1456 else
1457 sprintf (offstr[0],
1458 "[" HOST_WIDE_INT_PRINT_DEC"%" "l" "d" ", " HOST_WIDE_INT_PRINT_DEC"%" "l" "d" "]",
1459 dstref.offrange[0].to_shwi (),
1460 dstref.offrange[1].to_shwi ());
1461
1462 if (srcref.offrange[0] == srcref.offrange[1]
1463 || srcref.offrange[1] > HOST_WIDE_INT_MAX(~((long) (1UL << (64 - 1)))))
1464 sprintf (offstr[1],
1465 HOST_WIDE_INT_PRINT_DEC"%" "l" "d",
1466 srcref.offrange[0].to_shwi ());
1467 else
1468 sprintf (offstr[1],
1469 "[" HOST_WIDE_INT_PRINT_DEC"%" "l" "d" ", " HOST_WIDE_INT_PRINT_DEC"%" "l" "d" "]",
1470 srcref.offrange[0].to_shwi (),
1471 srcref.offrange[1].to_shwi ());
1472
1473 if (ovloff[0] == ovloff[1] || !ovloff[1])
1474 sprintf (offstr[2], HOST_WIDE_INT_PRINT_DEC"%" "l" "d", ovloff[0]);
1475 else
1476 sprintf (offstr[2],
1477 "[" HOST_WIDE_INT_PRINT_DEC"%" "l" "d" ", " HOST_WIDE_INT_PRINT_DEC"%" "l" "d" "]",
1478 ovloff[0], ovloff[1]);
1479
1480 const offset_int maxobjsize = dstref.maxobjsize;
1481 bool must_overlap = ovlsiz[0] > 0;
1482
1483 if (ovlsiz[1] == 0)
1484 ovlsiz[1] = ovlsiz[0];
1485
1486 if (must_overlap)
1487 {
1488 /* Issue definitive "overlaps" diagnostic in this block. */
1489
1490 if (sizrange[0] == sizrange[1])
1491 {
1492 if (ovlsiz[0] == ovlsiz[1])
1493 warning_at (loc, OPT_Wrestrict,
1494 sizrange[0] == 1
1495 ? (ovlsiz[0] == 1
1496 ? G_("%qD accessing %wu byte at offsets %s ""%qD accessing %wu byte at offsets %s " "and %s overlaps %wu byte at offset %s"
1497 "and %s overlaps %wu byte at offset %s")"%qD accessing %wu byte at offsets %s " "and %s overlaps %wu byte at offset %s"
1498 : G_("%qD accessing %wu byte at offsets %s ""%qD accessing %wu byte at offsets %s " "and %s overlaps %wu bytes at offset "
"%s"
1499 "and %s overlaps %wu bytes at offset ""%qD accessing %wu byte at offsets %s " "and %s overlaps %wu bytes at offset "
"%s"
1500 "%s")"%qD accessing %wu byte at offsets %s " "and %s overlaps %wu bytes at offset "
"%s"
)
1501 : (ovlsiz[0] == 1
1502 ? G_("%qD accessing %wu bytes at offsets %s ""%qD accessing %wu bytes at offsets %s " "and %s overlaps %wu byte at offset %s"
1503 "and %s overlaps %wu byte at offset %s")"%qD accessing %wu bytes at offsets %s " "and %s overlaps %wu byte at offset %s"
1504 : G_("%qD accessing %wu bytes at offsets %s ""%qD accessing %wu bytes at offsets %s " "and %s overlaps %wu bytes at offset "
"%s"
1505 "and %s overlaps %wu bytes at offset ""%qD accessing %wu bytes at offsets %s " "and %s overlaps %wu bytes at offset "
"%s"
1506 "%s")"%qD accessing %wu bytes at offsets %s " "and %s overlaps %wu bytes at offset "
"%s"
),
1507 func, sizrange[0],
1508 offstr[0], offstr[1], ovlsiz[0], offstr[2]);
1509 else if (ovlsiz[1] >= 0 && ovlsiz[1] < maxobjsize.to_shwi ())
1510 warning_n (loc, OPT_Wrestrict, sizrange[0],
1511 "%qD accessing %wu byte at offsets %s "
1512 "and %s overlaps between %wu and %wu bytes "
1513 "at offset %s",
1514 "%qD accessing %wu bytes at offsets %s "
1515 "and %s overlaps between %wu and %wu bytes "
1516 "at offset %s",
1517 func, sizrange[0], offstr[0], offstr[1],
1518 ovlsiz[0], ovlsiz[1], offstr[2]);
1519 else
1520 warning_n (loc, OPT_Wrestrict, sizrange[0],
1521 "%qD accessing %wu byte at offsets %s and "
1522 "%s overlaps %wu or more bytes at offset %s",
1523 "%qD accessing %wu bytes at offsets %s and "
1524 "%s overlaps %wu or more bytes at offset %s",
1525 func, sizrange[0],
1526 offstr[0], offstr[1], ovlsiz[0], offstr[2]);
1527 return true;
1528 }
1529
1530 if (sizrange[1] >= 0 && sizrange[1] < maxobjsize.to_shwi ())
1531 {
1532 if (ovlsiz[0] == ovlsiz[1])
1533 warning_n (loc, OPT_Wrestrict, ovlsiz[0],
1534 "%qD accessing between %wu and %wu bytes "
1535 "at offsets %s and %s overlaps %wu byte at "
1536 "offset %s",
1537 "%qD accessing between %wu and %wu bytes "
1538 "at offsets %s and %s overlaps %wu bytes "
1539 "at offset %s",
1540 func, sizrange[0], sizrange[1],
1541 offstr[0], offstr[1], ovlsiz[0], offstr[2]);
1542 else if (ovlsiz[1] >= 0 && ovlsiz[1] < maxobjsize.to_shwi ())
1543 warning_at (loc, OPT_Wrestrict,
1544 "%qD accessing between %wu and %wu bytes at "
1545 "offsets %s and %s overlaps between %wu and %wu "
1546 "bytes at offset %s",
1547 func, sizrange[0], sizrange[1],
1548 offstr[0], offstr[1], ovlsiz[0], ovlsiz[1],
1549 offstr[2]);
1550 else
1551 warning_at (loc, OPT_Wrestrict,
1552 "%qD accessing between %wu and %wu bytes at "
1553 "offsets %s and %s overlaps %wu or more bytes "
1554 "at offset %s",
1555 func, sizrange[0], sizrange[1],
1556 offstr[0], offstr[1], ovlsiz[0], offstr[2]);
1557 return true;
1558 }
1559
1560 if (ovlsiz[0] != ovlsiz[1])
1561 ovlsiz[1] = maxobjsize.to_shwi ();
1562
1563 if (ovlsiz[0] == ovlsiz[1])
1564 warning_n (loc, OPT_Wrestrict, ovlsiz[0],
1565 "%qD accessing %wu or more bytes at offsets "
1566 "%s and %s overlaps %wu byte at offset %s",
1567 "%qD accessing %wu or more bytes at offsets "
1568 "%s and %s overlaps %wu bytes at offset %s",
1569 func, sizrange[0], offstr[0], offstr[1],
1570 ovlsiz[0], offstr[2]);
1571 else if (ovlsiz[1] >= 0 && ovlsiz[1] < maxobjsize.to_shwi ())
1572 warning_at (loc, OPT_Wrestrict,
1573 "%qD accessing %wu or more bytes at offsets %s "
1574 "and %s overlaps between %wu and %wu bytes "
1575 "at offset %s",
1576 func, sizrange[0], offstr[0], offstr[1],
1577 ovlsiz[0], ovlsiz[1], offstr[2]);
1578 else
1579 warning_at (loc, OPT_Wrestrict,
1580 "%qD accessing %wu or more bytes at offsets %s "
1581 "and %s overlaps %wu or more bytes at offset %s",
1582 func, sizrange[0], offstr[0], offstr[1],
1583 ovlsiz[0], offstr[2]);
1584 return true;
1585 }
1586
1587 /* Use more concise wording when one of the offsets is unbounded
1588 to avoid confusing the user with large and mostly meaningless
1589 numbers. */
1590 bool open_range;
1591 if (DECL_P (dstref.base)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (dstref.base)->base.code))] == tcc_declaration)
&& TREE_CODE (TREE_TYPE (dstref.base))((enum tree_code) (((contains_struct_check ((dstref.base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1591, __FUNCTION__))->typed.type))->base.code)
== ARRAY_TYPE)
1592 open_range = ((dstref.offrange[0] == 0
1593 && dstref.offrange[1] == maxobjsize)
1594 || (srcref.offrange[0] == 0
1595 && srcref.offrange[1] == maxobjsize));
1596 else
1597 open_range = ((dstref.offrange[0] == -maxobjsize - 1
1598 && dstref.offrange[1] == maxobjsize)
1599 || (srcref.offrange[0] == -maxobjsize - 1
1600 && srcref.offrange[1] == maxobjsize));
1601
1602 if (sizrange[0] == sizrange[1] || sizrange[1] == 1)
1603 {
1604 if (ovlsiz[1] == 1)
1605 {
1606 if (open_range)
1607 warning_n (loc, OPT_Wrestrict, sizrange[1],
1608 "%qD accessing %wu byte may overlap "
1609 "%wu byte",
1610 "%qD accessing %wu bytes may overlap "
1611 "%wu byte",
1612 func, sizrange[1], ovlsiz[1]);
1613 else
1614 warning_n (loc, OPT_Wrestrict, sizrange[1],
1615 "%qD accessing %wu byte at offsets %s "
1616 "and %s may overlap %wu byte at offset %s",
1617 "%qD accessing %wu bytes at offsets %s "
1618 "and %s may overlap %wu byte at offset %s",
1619 func, sizrange[1], offstr[0], offstr[1],
1620 ovlsiz[1], offstr[2]);
1621 return true;
1622 }
1623
1624 if (open_range)
1625 warning_n (loc, OPT_Wrestrict, sizrange[1],
1626 "%qD accessing %wu byte may overlap "
1627 "up to %wu bytes",
1628 "%qD accessing %wu bytes may overlap "
1629 "up to %wu bytes",
1630 func, sizrange[1], ovlsiz[1]);
1631 else
1632 warning_n (loc, OPT_Wrestrict, sizrange[1],
1633 "%qD accessing %wu byte at offsets %s and "
1634 "%s may overlap up to %wu bytes at offset %s",
1635 "%qD accessing %wu bytes at offsets %s and "
1636 "%s may overlap up to %wu bytes at offset %s",
1637 func, sizrange[1], offstr[0], offstr[1],
1638 ovlsiz[1], offstr[2]);
1639 return true;
1640 }
1641
1642 if (sizrange[1] >= 0 && sizrange[1] < maxobjsize.to_shwi ())
1643 {
1644 if (open_range)
1645 warning_n (loc, OPT_Wrestrict, ovlsiz[1],
1646 "%qD accessing between %wu and %wu bytes "
1647 "may overlap %wu byte",
1648 "%qD accessing between %wu and %wu bytes "
1649 "may overlap up to %wu bytes",
1650 func, sizrange[0], sizrange[1], ovlsiz[1]);
1651 else
1652 warning_n (loc, OPT_Wrestrict, ovlsiz[1],
1653 "%qD accessing between %wu and %wu bytes "
1654 "at offsets %s and %s may overlap %wu byte "
1655 "at offset %s",
1656 "%qD accessing between %wu and %wu bytes "
1657 "at offsets %s and %s may overlap up to %wu "
1658 "bytes at offset %s",
1659 func, sizrange[0], sizrange[1],
1660 offstr[0], offstr[1], ovlsiz[1], offstr[2]);
1661 return true;
1662 }
1663
1664 warning_n (loc, OPT_Wrestrict, ovlsiz[1],
1665 "%qD accessing %wu or more bytes at offsets %s "
1666 "and %s may overlap %wu byte at offset %s",
1667 "%qD accessing %wu or more bytes at offsets %s "
1668 "and %s may overlap up to %wu bytes at offset %s",
1669 func, sizrange[0], offstr[0], offstr[1],
1670 ovlsiz[1], offstr[2]);
1671
1672 return true;
1673}
1674
1675/* Validate REF size and offsets in an expression passed as an argument
1676 to a CALL to a built-in function FUNC to make sure they are within
1677 the bounds of the referenced object if its size is known, or
1678 PTRDIFF_MAX otherwise. DO_WARN is true when a diagnostic should
1679 be issued, false otherwise.
1680 Both initial values of the offsets and their final value computed
1681 by the function by incrementing the initial value by the size are
1682 validated. Return the warning number if the offsets are not valid
1683 and a diagnostic has been issued, or would have been issued if
1684 DO_WARN had been true, otherwise an invalid warning number. */
1685
1686static opt_code
1687maybe_diag_access_bounds (gimple *call, tree func, int strict,
1688 const builtin_memref &ref, offset_int wroff,
1689 bool do_warn)
1690{
1691 location_t loc = gimple_location (call);
1692 const offset_int maxobjsize = ref.maxobjsize;
1693
1694 /* Check for excessive size first and regardless of warning options
1695 since the result is used to make codegen decisions. */
1696 if (ref.sizrange[0] > maxobjsize)
1697 {
1698 const opt_code opt = OPT_Wstringop_overflow_;
1699 /* Return true without issuing a warning. */
1700 if (!do_warn)
1701 return opt;
1702
1703 if (ref.ref && warning_suppressed_p (ref.ref, OPT_Wstringop_overflow_))
1704 return no_warning;
1705
1706 bool warned = false;
1707 if (warn_stringop_overflowglobal_options.x_warn_stringop_overflow)
1708 {
1709 if (ref.sizrange[0] == ref.sizrange[1])
1710 warned = warning_at (loc, opt,
1711 "%qD specified bound %wu "
1712 "exceeds maximum object size %wu",
1713 func, ref.sizrange[0].to_uhwi (),
1714 maxobjsize.to_uhwi ());
1715 else
1716 warned = warning_at (loc, opt,
1717 "%qD specified bound between %wu and %wu "
1718 "exceeds maximum object size %wu",
1719 func, ref.sizrange[0].to_uhwi (),
1720 ref.sizrange[1].to_uhwi (),
1721 maxobjsize.to_uhwi ());
1722 return warned ? opt : no_warning;
1723 }
1724 }
1725
1726 /* Check for out-bounds pointers regardless of warning options since
1727 the result is used to make codegen decisions. An excessive WROFF
1728 can only come up as a result of an invalid strncat bound and is
1729 diagnosed separately using a more meaningful warning. */
1730 if (maxobjsize < wroff)
1731 wroff = 0;
1732 offset_int ooboff[] = { ref.offrange[0], ref.offrange[1], wroff };
1733 tree oobref = ref.offset_out_of_bounds (strict, ooboff);
1734 if (!oobref)
1735 return no_warning;
1736
1737 const opt_code opt = OPT_Warray_bounds_;
1738 /* Return true without issuing a warning. */
1739 if (!do_warn)
1740 return opt;
1741
1742 if (!warn_array_boundsglobal_options.x_warn_array_bounds)
1743 return no_warning;
1744
1745 if (warning_suppressed_p (ref.ptr, opt)
1746 || (ref.ref && warning_suppressed_p (ref.ref, opt)))
1747 return no_warning;
1748
1749 char rangestr[2][64];
1750 if (ooboff[0] == ooboff[1]
1751 || (ooboff[0] != ref.offrange[0]
1752 && ooboff[0].to_shwi () >= ooboff[1].to_shwi ()))
1753 sprintf (rangestr[0], "%lli", (long long) ooboff[0].to_shwi ());
1754 else
1755 sprintf (rangestr[0], "[%lli, %lli]",
1756 (long long) ooboff[0].to_shwi (),
1757 (long long) ooboff[1].to_shwi ());
1758
1759 bool warned = false;
1760
1761 if (oobref == error_mark_nodeglobal_trees[TI_ERROR_MARK])
1762 {
1763 if (ref.sizrange[0] == ref.sizrange[1])
1764 sprintf (rangestr[1], "%llu",
1765 (unsigned long long) ref.sizrange[0].to_shwi ());
1766 else
1767 sprintf (rangestr[1], "[%lli, %lli]",
1768 (unsigned long long) ref.sizrange[0].to_uhwi (),
1769 (unsigned long long) ref.sizrange[1].to_uhwi ());
1770
1771 tree type;
1772
1773 if (DECL_P (ref.base)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (ref.base)->base.code))] == tcc_declaration)
1774 && TREE_CODE (type = TREE_TYPE (ref.base))((enum tree_code) (type = ((contains_struct_check ((ref.base)
, (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1774, __FUNCTION__))->typed.type))->base.code)
== ARRAY_TYPE)
1775 {
1776 auto_diagnostic_group d;
1777 if (warning_at (loc, opt,
1778 "%qD pointer overflow between offset %s "
1779 "and size %s accessing array %qD with type %qT",
1780 func, rangestr[0], rangestr[1], ref.base, type))
1781 {
1782 inform (DECL_SOURCE_LOCATION (ref.base)((contains_struct_check ((ref.base), (TS_DECL_MINIMAL), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1782, __FUNCTION__))->decl_minimal.locus)
,
1783 "array %qD declared here", ref.base);
1784 warned = true;
1785 }
1786 else
1787 warned = warning_at (loc, opt,
1788 "%qD pointer overflow between offset %s "
1789 "and size %s",
1790 func, rangestr[0], rangestr[1]);
1791 }
1792 else
1793 warned = warning_at (loc, opt,
1794 "%qD pointer overflow between offset %s "
1795 "and size %s",
1796 func, rangestr[0], rangestr[1]);
1797 }
1798 else if (oobref == ref.base)
1799 {
1800 /* True when the offset formed by an access to the reference
1801 is out of bounds, rather than the initial offset wich is
1802 in bounds. This implies access past the end. */
1803 bool form = ooboff[0] != ref.offrange[0];
1804
1805 if (DECL_P (ref.base)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (ref.base)->base.code))] == tcc_declaration)
)
1806 {
1807 auto_diagnostic_group d;
1808 if ((ref.basesize < maxobjsize
1809 && warning_at (loc, opt,
1810 form
1811 ? G_("%qD forming offset %s is out of ""%qD forming offset %s is out of " "the bounds [0, %wu] of object %qD with "
"type %qT"
1812 "the bounds [0, %wu] of object %qD with ""%qD forming offset %s is out of " "the bounds [0, %wu] of object %qD with "
"type %qT"
1813 "type %qT")"%qD forming offset %s is out of " "the bounds [0, %wu] of object %qD with "
"type %qT"
1814 : G_("%qD offset %s is out of the bounds ""%qD offset %s is out of the bounds " "[0, %wu] of object %qD with type %qT"
1815 "[0, %wu] of object %qD with type %qT")"%qD offset %s is out of the bounds " "[0, %wu] of object %qD with type %qT",
1816 func, rangestr[0], ref.basesize.to_uhwi (),
1817 ref.base, TREE_TYPE (ref.base)((contains_struct_check ((ref.base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1817, __FUNCTION__))->typed.type)
))
1818 || warning_at (loc, opt,
1819 form
1820 ? G_("%qD forming offset %s is out of ""%qD forming offset %s is out of " "the bounds of object %qD with type %qT"
1821 "the bounds of object %qD with type %qT")"%qD forming offset %s is out of " "the bounds of object %qD with type %qT"
1822 : G_("%qD offset %s is out of the bounds ""%qD offset %s is out of the bounds " "of object %qD with type %qT"
1823 "of object %qD with type %qT")"%qD offset %s is out of the bounds " "of object %qD with type %qT",
1824 func, rangestr[0],
1825 ref.base, TREE_TYPE (ref.base)((contains_struct_check ((ref.base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1825, __FUNCTION__))->typed.type)
))
1826 {
1827 inform (DECL_SOURCE_LOCATION (ref.base)((contains_struct_check ((ref.base), (TS_DECL_MINIMAL), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1827, __FUNCTION__))->decl_minimal.locus)
,
1828 "%qD declared here", ref.base);
1829 warned = true;
1830 }
1831 }
1832 else if (ref.basesize < maxobjsize)
1833 warned = warning_at (loc, opt,
1834 form
1835 ? G_("%qD forming offset %s is out ""%qD forming offset %s is out " "of the bounds [0, %wu]"
1836 "of the bounds [0, %wu]")"%qD forming offset %s is out " "of the bounds [0, %wu]"
1837 : G_("%qD offset %s is out ""%qD offset %s is out " "of the bounds [0, %wu]"
1838 "of the bounds [0, %wu]")"%qD offset %s is out " "of the bounds [0, %wu]",
1839 func, rangestr[0], ref.basesize.to_uhwi ());
1840 else
1841 warned = warning_at (loc, opt,
1842 form
1843 ? G_("%qD forming offset %s is out of bounds")"%qD forming offset %s is out of bounds"
1844 : G_("%qD offset %s is out of bounds")"%qD offset %s is out of bounds",
1845 func, rangestr[0]);
1846 }
1847 else if (TREE_CODE (ref.ref)((enum tree_code) (ref.ref)->base.code) == MEM_REF)
1848 {
1849 tree refop = TREE_OPERAND (ref.ref, 0)(*((const_cast<tree*> (tree_operand_check ((ref.ref), (
0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1849, __FUNCTION__)))))
;
1850 tree type = TREE_TYPE (refop)((contains_struct_check ((refop), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1850, __FUNCTION__))->typed.type)
;
1851 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1852 type = TREE_TYPE (type)((contains_struct_check ((type), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1852, __FUNCTION__))->typed.type)
;
1853 type = TYPE_MAIN_VARIANT (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1853, __FUNCTION__))->type_common.main_variant)
;
1854
1855 if (warning_at (loc, opt,
1856 "%qD offset %s from the object at %qE is out "
1857 "of the bounds of %qT",
1858 func, rangestr[0], ref.base, type))
1859 {
1860 if (TREE_CODE (ref.ref)((enum tree_code) (ref.ref)->base.code) == COMPONENT_REF)
1861 refop = TREE_OPERAND (ref.ref, 1)(*((const_cast<tree*> (tree_operand_check ((ref.ref), (
1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1861, __FUNCTION__)))))
;
1862 if (DECL_P (refop)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (refop)->base.code))] == tcc_declaration)
)
1863 inform (DECL_SOURCE_LOCATION (refop)((contains_struct_check ((refop), (TS_DECL_MINIMAL), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1863, __FUNCTION__))->decl_minimal.locus)
,
1864 "subobject %qD declared here", refop);
1865 warned = true;
1866 }
1867 }
1868 else
1869 {
1870 tree refop = TREE_OPERAND (ref.ref, 0)(*((const_cast<tree*> (tree_operand_check ((ref.ref), (
0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1870, __FUNCTION__)))))
;
1871 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (ref.ref))((tree_class_check ((((contains_struct_check ((ref.ref), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1871, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1871, __FUNCTION__))->type_common.main_variant)
;
1872
1873 if (warning_at (loc, opt,
1874 "%qD offset %s from the object at %qE is out "
1875 "of the bounds of referenced subobject %qD with "
1876 "type %qT at offset %wi",
1877 func, rangestr[0], ref.base,
1878 TREE_OPERAND (ref.ref, 1)(*((const_cast<tree*> (tree_operand_check ((ref.ref), (
1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1878, __FUNCTION__)))))
, type,
1879 ref.refoff.to_shwi ()))
1880 {
1881 if (TREE_CODE (ref.ref)((enum tree_code) (ref.ref)->base.code) == COMPONENT_REF)
1882 refop = TREE_OPERAND (ref.ref, 1)(*((const_cast<tree*> (tree_operand_check ((ref.ref), (
1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1882, __FUNCTION__)))))
;
1883 if (DECL_P (refop)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (refop)->base.code))] == tcc_declaration)
)
1884 inform (DECL_SOURCE_LOCATION (refop)((contains_struct_check ((refop), (TS_DECL_MINIMAL), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1884, __FUNCTION__))->decl_minimal.locus)
,
1885 "subobject %qD declared here", refop);
1886 warned = true;
1887 }
1888 }
1889
1890 return warned ? opt : no_warning;
1891}
1892
1893/* Check a CALL statement for restrict-violations and issue warnings
1894 if/when appropriate. */
1895
1896void
1897pass_wrestrict::check_call (gimple *call)
1898{
1899 /* Avoid checking the call if it has already been diagnosed for
1900 some reason. */
1901 if (warning_suppressed_p (call, OPT_Wrestrict))
1902 return;
1903
1904 tree func = gimple_call_fndecl (call);
1905 if (!func || !fndecl_built_in_p (func, BUILT_IN_NORMAL))
1906 return;
1907
1908 /* Argument number to extract from the call (depends on the built-in
1909 and its kind). */
1910 unsigned dst_idx = -1;
1911 unsigned src_idx = -1;
1912 unsigned bnd_idx = -1;
1913
1914 /* Is this CALL to a string function (as opposed to one to a raw
1915 memory function). */
1916 bool strfun = true;
1917
1918 switch (DECL_FUNCTION_CODE (func))
1919 {
1920 case BUILT_IN_MEMCPY:
1921 case BUILT_IN_MEMCPY_CHK:
1922 case BUILT_IN_MEMPCPY:
1923 case BUILT_IN_MEMPCPY_CHK:
1924 case BUILT_IN_MEMMOVE:
1925 case BUILT_IN_MEMMOVE_CHK:
1926 strfun = false;
1927 /* Fall through. */
1928
1929 case BUILT_IN_STPNCPY:
1930 case BUILT_IN_STPNCPY_CHK:
1931 case BUILT_IN_STRNCAT:
1932 case BUILT_IN_STRNCAT_CHK:
1933 case BUILT_IN_STRNCPY:
1934 case BUILT_IN_STRNCPY_CHK:
1935 dst_idx = 0;
1936 src_idx = 1;
1937 bnd_idx = 2;
1938 break;
1939
1940 case BUILT_IN_MEMSET:
1941 case BUILT_IN_MEMSET_CHK:
1942 dst_idx = 0;
1943 bnd_idx = 2;
1944 break;
1945
1946 case BUILT_IN_STPCPY:
1947 case BUILT_IN_STPCPY_CHK:
1948 case BUILT_IN_STRCPY:
1949 case BUILT_IN_STRCPY_CHK:
1950 case BUILT_IN_STRCAT:
1951 case BUILT_IN_STRCAT_CHK:
1952 dst_idx = 0;
1953 src_idx = 1;
1954 break;
1955
1956 default:
1957 /* Handle other string functions here whose access may need
1958 to be validated for in-bounds offsets and non-overlapping
1959 copies. */
1960 return;
1961 }
1962
1963 unsigned nargs = gimple_call_num_args (call);
1964
1965 tree dst = dst_idx < nargs ? gimple_call_arg (call, dst_idx) : NULL_TREE(tree) __null;
1966 tree src = src_idx < nargs ? gimple_call_arg (call, src_idx) : NULL_TREE(tree) __null;
1967 tree dstwr = bnd_idx < nargs ? gimple_call_arg (call, bnd_idx) : NULL_TREE(tree) __null;
1968
1969 /* For string functions with an unspecified or unknown bound,
1970 assume the size of the access is one. */
1971 if (!dstwr && strfun)
1972 dstwr = size_one_nodeglobal_trees[TI_SIZE_ONE];
1973
1974 /* DST and SRC can be null for a call with an insufficient number
1975 of arguments to a built-in function declared without a protype. */
1976 if (!dst || (src_idx < nargs && !src))
1977 return;
1978
1979 /* DST, SRC, or DSTWR can also have the wrong type in a call to
1980 a function declared without a prototype. Avoid checking such
1981 invalid calls. */
1982 if (TREE_CODE (TREE_TYPE (dst))((enum tree_code) (((contains_struct_check ((dst), (TS_TYPED)
, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1982, __FUNCTION__))->typed.type))->base.code)
!= POINTER_TYPE
1983 || (src && TREE_CODE (TREE_TYPE (src))((enum tree_code) (((contains_struct_check ((src), (TS_TYPED)
, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1983, __FUNCTION__))->typed.type))->base.code)
!= POINTER_TYPE)
1984 || (dstwr && !INTEGRAL_TYPE_P (TREE_TYPE (dstwr))(((enum tree_code) (((contains_struct_check ((dstwr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1984, __FUNCTION__))->typed.type))->base.code) == ENUMERAL_TYPE
|| ((enum tree_code) (((contains_struct_check ((dstwr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1984, __FUNCTION__))->typed.type))->base.code) == BOOLEAN_TYPE
|| ((enum tree_code) (((contains_struct_check ((dstwr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/gimple-ssa-warn-restrict.cc"
, 1984, __FUNCTION__))->typed.type))->base.code) == INTEGER_TYPE
)
))
1985 return;
1986
1987 opt_code opt = check_bounds_or_overlap (m_ptr_qry, call, dst, src, dstwr,
1988 NULL_TREE(tree) __null);
1989 /* Avoid diagnosing the call again. */
1990 suppress_warning (call, opt);
1991}
1992
1993} /* anonymous namespace */
1994
1995/* Attempt to detect and diagnose invalid offset bounds and (except for
1996 memmove) overlapping copy in a call expression EXPR from SRC to DST
1997 and DSTSIZE and SRCSIZE bytes, respectively. Both DSTSIZE and
1998 SRCSIZE may be NULL. DO_WARN is false to detect either problem
1999 without issue a warning. Return the OPT_Wxxx constant corresponding
2000 to the warning if one has been detected and zero otherwise. */
2001
2002opt_code
2003check_bounds_or_overlap (gimple *call, tree dst, tree src, tree dstsize,
2004 tree srcsize, bool bounds_only /* = false */,
2005 bool do_warn /* = true */)
2006{
2007 pointer_query ptrqry (get_range_query (cfun(cfun + 0)));
2008 return check_bounds_or_overlap (ptrqry,
2009 call, dst, src, dstsize, srcsize,
2010 bounds_only, do_warn);
2011}
2012
2013opt_code
2014check_bounds_or_overlap (pointer_query &ptrqry,
2015 gimple *call, tree dst, tree src, tree dstsize,
2016 tree srcsize, bool bounds_only /* = false */,
2017 bool do_warn /* = true */)
2018{
2019 tree func = gimple_call_fndecl (call);
2020
2021 builtin_memref dstref (ptrqry, call, dst, dstsize);
2022 builtin_memref srcref (ptrqry, call, src, srcsize);
2023
2024 /* Create a descriptor of the access. This may adjust both DSTREF
2025 and SRCREF based on one another and the kind of the access. */
2026 builtin_access acs (ptrqry, call, dstref, srcref);
2027
2028 /* Set STRICT to the value of the -Warray-bounds=N argument for
2029 string functions or when N > 1. */
2030 int strict = (acs.strict () || warn_array_boundsglobal_options.x_warn_array_bounds > 1 ? warn_array_boundsglobal_options.x_warn_array_bounds : 0);
2031
2032 /* The starting offset of the destination write access. Nonzero only
2033 for the strcat family of functions. */
2034 offset_int wroff = acs.write_off (dstsize);
2035
2036 /* Validate offsets to each reference before the access first to make
2037 sure they are within the bounds of the destination object if its
2038 size is known, or PTRDIFF_MAX otherwise. */
2039 opt_code opt
2040 = maybe_diag_access_bounds (call, func, strict, dstref, wroff, do_warn);
2041 if (opt == no_warning)
2042 opt = maybe_diag_access_bounds (call, func, strict, srcref, 0, do_warn);
2043
2044 if (opt != no_warning)
2045 {
2046 if (do_warn)
2047 suppress_warning (call, opt);
2048 return opt;
2049 }
2050
2051 if (!warn_restrictglobal_options.x_warn_restrict || bounds_only || !src)
2052 return no_warning;
2053
2054 if (!bounds_only)
2055 {
2056 switch (DECL_FUNCTION_CODE (func))
2057 {
2058 case BUILT_IN_MEMMOVE:
2059 case BUILT_IN_MEMMOVE_CHK:
2060 case BUILT_IN_MEMSET:
2061 case BUILT_IN_MEMSET_CHK:
2062 return no_warning;
2063 default:
2064 break;
2065 }
2066 }
2067
2068 location_t loc = gimple_location (call);
2069 if (operand_equal_p (dst, src, 0))
2070 {
2071 /* Issue -Wrestrict unless the pointers are null (those do
2072 not point to objects and so do not indicate an overlap;
2073 such calls could be the result of sanitization and jump
2074 threading). */
2075 if (!integer_zerop (dst) && !warning_suppressed_p (call, OPT_Wrestrict))
2076 {
2077 warning_at (loc, OPT_Wrestrict,
2078 "%qD source argument is the same as destination",
2079 func);
2080 suppress_warning (call, OPT_Wrestrict);
2081 return OPT_Wrestrict;
2082 }
2083
2084 return no_warning;
2085 }
2086
2087 /* Return false when overlap has been detected. */
2088 if (maybe_diag_overlap (loc, call, acs))
2089 {
2090 suppress_warning (call, OPT_Wrestrict);
2091 return OPT_Wrestrict;
2092 }
2093
2094 return no_warning;
2095}
2096
2097gimple_opt_pass *
2098make_pass_warn_restrict (gcc::context *ctxt)
2099{
2100 return new pass_wrestrict (ctxt);
2101}
2102
2103DEBUG_FUNCTION__attribute__ ((__used__)) void
2104dump_builtin_memref (FILE *fp, const builtin_memref &ref)
2105{
2106 fprintf (fp, "\n ptr = ");
2107 print_generic_expr (fp, ref.ptr, TDF_LINENO);
2108 fprintf (fp, "\n ref = ");
2109 if (ref.ref)
2110 print_generic_expr (fp, ref.ref, TDF_LINENO);
2111 else
2112 fputs ("null", fp);
2113 fprintf (fp, "\n base = ");
2114 print_generic_expr (fp, ref.base, TDF_LINENO);
2115 fprintf (fp,
2116 "\n basesize = %lli"
2117 "\n refsize = %lli"
2118 "\n refoff = %lli"
2119 "\n offrange = [%lli, %lli]"
2120 "\n sizrange = [%lli, %lli]"
2121 "\n strbounded_p = %s\n",
2122 (long long)ref.basesize.to_shwi (),
2123 (long long)ref.refsize.to_shwi (),
2124 (long long)ref.refoff.to_shwi (),
2125 (long long)ref.offrange[0].to_shwi (),
2126 (long long)ref.offrange[1].to_shwi (),
2127 (long long)ref.sizrange[0].to_shwi (),
2128 (long long)ref.sizrange[1].to_shwi (),
2129 ref.strbounded_p ? "true" : "false");
2130}
2131
2132void
2133builtin_access::dump (FILE *fp) const
2134{
2135 fprintf (fp, " dstref:");
2136 dump_builtin_memref (fp, *dstref);
2137 fprintf (fp, "\n srcref:");
2138 dump_builtin_memref (fp, *srcref);
2139
2140 fprintf (fp,
2141 " sizrange = [%lli, %lli]\n"
2142 " ovloff = [%lli, %lli]\n"
2143 " ovlsiz = [%lli, %lli]\n"
2144 " dstoff = [%lli, %lli]\n"
2145 " dstsiz = [%lli, %lli]\n"
2146 " srcoff = [%lli, %lli]\n"
2147 " srcsiz = [%lli, %lli]\n",
2148 (long long)sizrange[0], (long long)sizrange[1],
2149 (long long)ovloff[0], (long long)ovloff[1],
2150 (long long)ovlsiz[0], (long long)ovlsiz[1],
2151 (long long)dstoff[0].to_shwi (), (long long)dstoff[1].to_shwi (),
2152 (long long)dstsiz[0].to_shwi (), (long long)dstsiz[1].to_shwi (),
2153 (long long)srcoff[0].to_shwi (), (long long)srcoff[1].to_shwi (),
2154 (long long)srcsiz[0].to_shwi (), (long long)srcsiz[1].to_shwi ());
2155}
2156
2157DEBUG_FUNCTION__attribute__ ((__used__)) void
2158dump_builtin_access (FILE *fp, gimple *stmt, const builtin_access &acs)
2159{
2160 if (stmt)
2161 {
2162 fprintf (fp, "\nDumping builtin_access for ");
2163 print_gimple_expr (fp, stmt, TDF_LINENO);
2164 fputs (":\n", fp);
2165 }
2166
2167 acs.dump (fp);
2168}
2169
2170DEBUG_FUNCTION__attribute__ ((__used__)) void
2171debug (gimple *stmt, const builtin_access &acs)
2172{
2173 dump_builtin_access (stdoutstdout, stmt, acs);
2174}

/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.h

1/* Operations with very long integers. -*- C++ -*-
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
8Free Software Foundation; either version 3, or (at your option) any
9later version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#ifndef WIDE_INT_H
21#define WIDE_INT_H
22
23/* wide-int.[cc|h] implements a class that efficiently performs
24 mathematical operations on finite precision integers. wide_ints
25 are designed to be transient - they are not for long term storage
26 of values. There is tight integration between wide_ints and the
27 other longer storage GCC representations (rtl and tree).
28
29 The actual precision of a wide_int depends on the flavor. There
30 are three predefined flavors:
31
32 1) wide_int (the default). This flavor does the math in the
33 precision of its input arguments. It is assumed (and checked)
34 that the precisions of the operands and results are consistent.
35 This is the most efficient flavor. It is not possible to examine
36 bits above the precision that has been specified. Because of
37 this, the default flavor has semantics that are simple to
38 understand and in general model the underlying hardware that the
39 compiler is targetted for.
40
41 This flavor must be used at the RTL level of gcc because there
42 is, in general, not enough information in the RTL representation
43 to extend a value beyond the precision specified in the mode.
44
45 This flavor should also be used at the TREE and GIMPLE levels of
46 the compiler except for the circumstances described in the
47 descriptions of the other two flavors.
48
49 The default wide_int representation does not contain any
50 information inherent about signedness of the represented value,
51 so it can be used to represent both signed and unsigned numbers.
52 For operations where the results depend on signedness (full width
53 multiply, division, shifts, comparisons, and operations that need
54 overflow detected), the signedness must be specified separately.
55
56 2) offset_int. This is a fixed-precision integer that can hold
57 any address offset, measured in either bits or bytes, with at
58 least one extra sign bit. At the moment the maximum address
59 size GCC supports is 64 bits. With 8-bit bytes and an extra
60 sign bit, offset_int therefore needs to have at least 68 bits
61 of precision. We round this up to 128 bits for efficiency.
62 Values of type T are converted to this precision by sign- or
63 zero-extending them based on the signedness of T.
64
65 The extra sign bit means that offset_int is effectively a signed
66 128-bit integer, i.e. it behaves like int128_t.
67
68 Since the values are logically signed, there is no need to
69 distinguish between signed and unsigned operations. Sign-sensitive
70 comparison operators <, <=, > and >= are therefore supported.
71 Shift operators << and >> are also supported, with >> being
72 an _arithmetic_ right shift.
73
74 [ Note that, even though offset_int is effectively int128_t,
75 it can still be useful to use unsigned comparisons like
76 wi::leu_p (a, b) as a more efficient short-hand for
77 "a >= 0 && a <= b". ]
78
79 3) widest_int. This representation is an approximation of
80 infinite precision math. However, it is not really infinite
81 precision math as in the GMP library. It is really finite
82 precision math where the precision is 4 times the size of the
83 largest integer that the target port can represent.
84
85 Like offset_int, widest_int is wider than all the values that
86 it needs to represent, so the integers are logically signed.
87 Sign-sensitive comparison operators <, <=, > and >= are supported,
88 as are << and >>.
89
90 There are several places in the GCC where this should/must be used:
91
92 * Code that does induction variable optimizations. This code
93 works with induction variables of many different types at the
94 same time. Because of this, it ends up doing many different
95 calculations where the operands are not compatible types. The
96 widest_int makes this easy, because it provides a field where
97 nothing is lost when converting from any variable,
98
99 * There are a small number of passes that currently use the
100 widest_int that should use the default. These should be
101 changed.
102
103 There are surprising features of offset_int and widest_int
104 that the users should be careful about:
105
106 1) Shifts and rotations are just weird. You have to specify a
107 precision in which the shift or rotate is to happen in. The bits
108 above this precision are zeroed. While this is what you
109 want, it is clearly non obvious.
110
111 2) Larger precision math sometimes does not produce the same
112 answer as would be expected for doing the math at the proper
113 precision. In particular, a multiply followed by a divide will
114 produce a different answer if the first product is larger than
115 what can be represented in the input precision.
116
117 The offset_int and the widest_int flavors are more expensive
118 than the default wide int, so in addition to the caveats with these
119 two, the default is the prefered representation.
120
121 All three flavors of wide_int are represented as a vector of
122 HOST_WIDE_INTs. The default and widest_int vectors contain enough elements
123 to hold a value of MAX_BITSIZE_MODE_ANY_INT bits. offset_int contains only
124 enough elements to hold ADDR_MAX_PRECISION bits. The values are stored
125 in the vector with the least significant HOST_BITS_PER_WIDE_INT bits
126 in element 0.
127
128 The default wide_int contains three fields: the vector (VAL),
129 the precision and a length (LEN). The length is the number of HWIs
130 needed to represent the value. widest_int and offset_int have a
131 constant precision that cannot be changed, so they only store the
132 VAL and LEN fields.
133
134 Since most integers used in a compiler are small values, it is
135 generally profitable to use a representation of the value that is
136 as small as possible. LEN is used to indicate the number of
137 elements of the vector that are in use. The numbers are stored as
138 sign extended numbers as a means of compression. Leading
139 HOST_WIDE_INTs that contain strings of either -1 or 0 are removed
140 as long as they can be reconstructed from the top bit that is being
141 represented.
142
143 The precision and length of a wide_int are always greater than 0.
144 Any bits in a wide_int above the precision are sign-extended from the
145 most significant bit. For example, a 4-bit value 0x8 is represented as
146 VAL = { 0xf...fff8 }. However, as an optimization, we allow other integer
147 constants to be represented with undefined bits above the precision.
148 This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN,
149 so that the INTEGER_CST representation can be used both in TYPE_PRECISION
150 and in wider precisions.
151
152 There are constructors to create the various forms of wide_int from
153 trees, rtl and constants. For trees the options are:
154
155 tree t = ...;
156 wi::to_wide (t) // Treat T as a wide_int
157 wi::to_offset (t) // Treat T as an offset_int
158 wi::to_widest (t) // Treat T as a widest_int
159
160 All three are light-weight accessors that should have no overhead
161 in release builds. If it is useful for readability reasons to
162 store the result in a temporary variable, the preferred method is:
163
164 wi::tree_to_wide_ref twide = wi::to_wide (t);
165 wi::tree_to_offset_ref toffset = wi::to_offset (t);
166 wi::tree_to_widest_ref twidest = wi::to_widest (t);
167
168 To make an rtx into a wide_int, you have to pair it with a mode.
169 The canonical way to do this is with rtx_mode_t as in:
170
171 rtx r = ...
172 wide_int x = rtx_mode_t (r, mode);
173
174 Similarly, a wide_int can only be constructed from a host value if
175 the target precision is given explicitly, such as in:
176
177 wide_int x = wi::shwi (c, prec); // sign-extend C if necessary
178 wide_int y = wi::uhwi (c, prec); // zero-extend C if necessary
179
180 However, offset_int and widest_int have an inherent precision and so
181 can be initialized directly from a host value:
182
183 offset_int x = (int) c; // sign-extend C
184 widest_int x = (unsigned int) c; // zero-extend C
185
186 It is also possible to do arithmetic directly on rtx_mode_ts and
187 constants. For example:
188
189 wi::add (r1, r2); // add equal-sized rtx_mode_ts r1 and r2
190 wi::add (r1, 1); // add 1 to rtx_mode_t r1
191 wi::lshift (1, 100); // 1 << 100 as a widest_int
192
193 Many binary operations place restrictions on the combinations of inputs,
194 using the following rules:
195
196 - {rtx, wide_int} op {rtx, wide_int} -> wide_int
197 The inputs must be the same precision. The result is a wide_int
198 of the same precision
199
200 - {rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
201 (un)signed HOST_WIDE_INT op {rtx, wide_int} -> wide_int
202 The HOST_WIDE_INT is extended or truncated to the precision of
203 the other input. The result is a wide_int of the same precision
204 as that input.
205
206 - (un)signed HOST_WIDE_INT op (un)signed HOST_WIDE_INT -> widest_int
207 The inputs are extended to widest_int precision and produce a
208 widest_int result.
209
210 - offset_int op offset_int -> offset_int
211 offset_int op (un)signed HOST_WIDE_INT -> offset_int
212 (un)signed HOST_WIDE_INT op offset_int -> offset_int
213
214 - widest_int op widest_int -> widest_int
215 widest_int op (un)signed HOST_WIDE_INT -> widest_int
216 (un)signed HOST_WIDE_INT op widest_int -> widest_int
217
218 Other combinations like:
219
220 - widest_int op offset_int and
221 - wide_int op offset_int
222
223 are not allowed. The inputs should instead be extended or truncated
224 so that they match.
225
226 The inputs to comparison functions like wi::eq_p and wi::lts_p
227 follow the same compatibility rules, although their return types
228 are different. Unary functions on X produce the same result as
229 a binary operation X + X. Shift functions X op Y also produce
230 the same result as X + X; the precision of the shift amount Y
231 can be arbitrarily different from X. */
232
233/* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very
234 early examination of the target's mode file. The WIDE_INT_MAX_ELTS
235 can accomodate at least 1 more bit so that unsigned numbers of that
236 mode can be represented as a signed value. Note that it is still
237 possible to create fixed_wide_ints that have precisions greater than
238 MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a
239 double-width multiplication result, for example. */
240#define WIDE_INT_MAX_ELTS(((64*(8)) + 64) / 64) \
241 ((MAX_BITSIZE_MODE_ANY_INT(64*(8)) + HOST_BITS_PER_WIDE_INT64) / HOST_BITS_PER_WIDE_INT64)
242
243#define WIDE_INT_MAX_PRECISION((((64*(8)) + 64) / 64) * 64) (WIDE_INT_MAX_ELTS(((64*(8)) + 64) / 64) * HOST_BITS_PER_WIDE_INT64)
244
245/* This is the max size of any pointer on any machine. It does not
246 seem to be as easy to sniff this out of the machine description as
247 it is for MAX_BITSIZE_MODE_ANY_INT since targets may support
248 multiple address sizes and may have different address sizes for
249 different address spaces. However, currently the largest pointer
250 on any platform is 64 bits. When that changes, then it is likely
251 that a target hook should be defined so that targets can make this
252 value larger for those targets. */
253#define ADDR_MAX_BITSIZE64 64
254
255/* This is the internal precision used when doing any address
256 arithmetic. The '4' is really 3 + 1. Three of the bits are for
257 the number of extra bits needed to do bit addresses and the other bit
258 is to allow everything to be signed without loosing any precision.
259 Then everything is rounded up to the next HWI for efficiency. */
260#define ADDR_MAX_PRECISION((64 + 4 + 64 - 1) & ~(64 - 1)) \
261 ((ADDR_MAX_BITSIZE64 + 4 + HOST_BITS_PER_WIDE_INT64 - 1) \
262 & ~(HOST_BITS_PER_WIDE_INT64 - 1))
263
264/* The number of HWIs needed to store an offset_int. */
265#define OFFSET_INT_ELTS(((64 + 4 + 64 - 1) & ~(64 - 1)) / 64) (ADDR_MAX_PRECISION((64 + 4 + 64 - 1) & ~(64 - 1)) / HOST_BITS_PER_WIDE_INT64)
266
267/* The type of result produced by a binary operation on types T1 and T2.
268 Defined purely for brevity. */
269#define WI_BINARY_RESULT(T1, T2)typename wi::binary_traits <T1, T2>::result_type \
270 typename wi::binary_traits <T1, T2>::result_type
271
272/* Likewise for binary operators, which excludes the case in which neither
273 T1 nor T2 is a wide-int-based type. */
274#define WI_BINARY_OPERATOR_RESULT(T1, T2)typename wi::binary_traits <T1, T2>::operator_result \
275 typename wi::binary_traits <T1, T2>::operator_result
276
277/* The type of result produced by T1 << T2. Leads to substitution failure
278 if the operation isn't supported. Defined purely for brevity. */
279#define WI_SIGNED_SHIFT_RESULT(T1, T2)typename wi::binary_traits <T1, T2>::signed_shift_result_type \
280 typename wi::binary_traits <T1, T2>::signed_shift_result_type
281
282/* The type of result produced by a sign-agnostic binary predicate on
283 types T1 and T2. This is bool if wide-int operations make sense for
284 T1 and T2 and leads to substitution failure otherwise. */
285#define WI_BINARY_PREDICATE_RESULT(T1, T2)typename wi::binary_traits <T1, T2>::predicate_result \
286 typename wi::binary_traits <T1, T2>::predicate_result
287
288/* The type of result produced by a signed binary predicate on types T1 and T2.
289 This is bool if signed comparisons make sense for T1 and T2 and leads to
290 substitution failure otherwise. */
291#define WI_SIGNED_BINARY_PREDICATE_RESULT(T1, T2)typename wi::binary_traits <T1, T2>::signed_predicate_result \
292 typename wi::binary_traits <T1, T2>::signed_predicate_result
293
294/* The type of result produced by a unary operation on type T. */
295#define WI_UNARY_RESULT(T)typename wi::binary_traits <T, T>::result_type \
296 typename wi::binary_traits <T, T>::result_type
297
298/* Define a variable RESULT to hold the result of a binary operation on
299 X and Y, which have types T1 and T2 respectively. Define VAL to
300 point to the blocks of RESULT. Once the user of the macro has
301 filled in VAL, it should call RESULT.set_len to set the number
302 of initialized blocks. */
303#define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y)typename wi::binary_traits <T1, T2>::result_type RESULT
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (X, Y); long *VAL = RESULT
.write_val ()
\
304 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type RESULT = \
305 wi::int_traits <WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type>::get_binary_result (X, Y); \
306 HOST_WIDE_INTlong *VAL = RESULT.write_val ()
307
308/* Similar for the result of a unary operation on X, which has type T. */
309#define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X)typename wi::binary_traits <T, T>::result_type RESULT =
wi::int_traits <typename wi::binary_traits <T, T>::
result_type>::get_binary_result (X, X); long *VAL = RESULT
.write_val ()
\
310 WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type RESULT = \
311 wi::int_traits <WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type>::get_binary_result (X, X); \
312 HOST_WIDE_INTlong *VAL = RESULT.write_val ()
313
314template <typename T> class generic_wide_int;
315template <int N> class fixed_wide_int_storage;
316class wide_int_storage;
317
318/* An N-bit integer. Until we can use typedef templates, use this instead. */
319#define FIXED_WIDE_INT(N)generic_wide_int < fixed_wide_int_storage <N> > \
320 generic_wide_int < fixed_wide_int_storage <N> >
321
322typedef generic_wide_int <wide_int_storage> wide_int;
323typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION)generic_wide_int < fixed_wide_int_storage <((64 + 4 + 64
- 1) & ~(64 - 1))> >
offset_int;
324typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION)generic_wide_int < fixed_wide_int_storage <((((64*(8)) +
64) / 64) * 64)> >
widest_int;
325/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
326 so as not to confuse gengtype. */
327typedef generic_wide_int < fixed_wide_int_storage <WIDE_INT_MAX_PRECISION((((64*(8)) + 64) / 64) * 64) * 2> > widest2_int;
328
329/* wi::storage_ref can be a reference to a primitive type,
330 so this is the conservatively-correct setting. */
331template <bool SE, bool HDP = true>
332class wide_int_ref_storage;
333
334typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
335
336/* This can be used instead of wide_int_ref if the referenced value is
337 known to have type T. It carries across properties of T's representation,
338 such as whether excess upper bits in a HWI are defined, and can therefore
339 help avoid redundant work.
340
341 The macro could be replaced with a template typedef, once we're able
342 to use those. */
343#define WIDE_INT_REF_FOR(T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
\
344 generic_wide_int \
345 <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended, \
346 wi::int_traits <T>::host_dependent_precision> >
347
348namespace wi
349{
350 /* Operations that calculate overflow do so even for
351 TYPE_OVERFLOW_WRAPS types. For example, adding 1 to +MAX_INT in
352 an unsigned int is 0 and does not overflow in C/C++, but wi::add
353 will set the overflow argument in case it's needed for further
354 analysis.
355
356 For operations that require overflow, these are the different
357 types of overflow. */
358 enum overflow_type {
359 OVF_NONE = 0,
360 OVF_UNDERFLOW = -1,
361 OVF_OVERFLOW = 1,
362 /* There was an overflow, but we are unsure whether it was an
363 overflow or an underflow. */
364 OVF_UNKNOWN = 2
365 };
366
367 /* Classifies an integer based on its precision. */
368 enum precision_type {
369 /* The integer has both a precision and defined signedness. This allows
370 the integer to be converted to any width, since we know whether to fill
371 any extra bits with zeros or signs. */
372 FLEXIBLE_PRECISION,
373
374 /* The integer has a variable precision but no defined signedness. */
375 VAR_PRECISION,
376
377 /* The integer has a constant precision (known at GCC compile time)
378 and is signed. */
379 CONST_PRECISION
380 };
381
382 /* This class, which has no default implementation, is expected to
383 provide the following members:
384
385 static const enum precision_type precision_type;
386 Classifies the type of T.
387
388 static const unsigned int precision;
389 Only defined if precision_type == CONST_PRECISION. Specifies the
390 precision of all integers of type T.
391
392 static const bool host_dependent_precision;
393 True if the precision of T depends (or can depend) on the host.
394
395 static unsigned int get_precision (const T &x)
396 Return the number of bits in X.
397
398 static wi::storage_ref *decompose (HOST_WIDE_INT *scratch,
399 unsigned int precision, const T &x)
400 Decompose X as a PRECISION-bit integer, returning the associated
401 wi::storage_ref. SCRATCH is available as scratch space if needed.
402 The routine should assert that PRECISION is acceptable. */
403 template <typename T> struct int_traits;
404
405 /* This class provides a single type, result_type, which specifies the
406 type of integer produced by a binary operation whose inputs have
407 types T1 and T2. The definition should be symmetric. */
408 template <typename T1, typename T2,
409 enum precision_type P1 = int_traits <T1>::precision_type,
410 enum precision_type P2 = int_traits <T2>::precision_type>
411 struct binary_traits;
412
413 /* Specify the result type for each supported combination of binary
414 inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be
415 mixed, in order to give stronger type checking. When both inputs
416 are CONST_PRECISION, they must have the same precision. */
417 template <typename T1, typename T2>
418 struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
419 {
420 typedef widest_int result_type;
421 /* Don't define operators for this combination. */
422 };
423
424 template <typename T1, typename T2>
425 struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION>
426 {
427 typedef wide_int result_type;
428 typedef result_type operator_result;
429 typedef bool predicate_result;
430 };
431
432 template <typename T1, typename T2>
433 struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
434 {
435 /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
436 so as not to confuse gengtype. */
437 typedef generic_wide_int < fixed_wide_int_storage
438 <int_traits <T2>::precision> > result_type;
439 typedef result_type operator_result;
440 typedef bool predicate_result;
441 typedef result_type signed_shift_result_type;
442 typedef bool signed_predicate_result;
443 };
444
445 template <typename T1, typename T2>
446 struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
447 {
448 typedef wide_int result_type;
449 typedef result_type operator_result;
450 typedef bool predicate_result;
451 };
452
453 template <typename T1, typename T2>
454 struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
455 {
456 /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
457 so as not to confuse gengtype. */
458 typedef generic_wide_int < fixed_wide_int_storage
459 <int_traits <T1>::precision> > result_type;
460 typedef result_type operator_result;
461 typedef bool predicate_result;
462 typedef result_type signed_shift_result_type;
463 typedef bool signed_predicate_result;
464 };
465
466 template <typename T1, typename T2>
467 struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
468 {
469 STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision)static_assert ((int_traits <T1>::precision == int_traits
<T2>::precision), "int_traits <T1>::precision == int_traits <T2>::precision"
)
;
470 /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
471 so as not to confuse gengtype. */
472 typedef generic_wide_int < fixed_wide_int_storage
473 <int_traits <T1>::precision> > result_type;
474 typedef result_type operator_result;
475 typedef bool predicate_result;
476 typedef result_type signed_shift_result_type;
477 typedef bool signed_predicate_result;
478 };
479
480 template <typename T1, typename T2>
481 struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
482 {
483 typedef wide_int result_type;
484 typedef result_type operator_result;
485 typedef bool predicate_result;
486 };
487}
488
489/* Public functions for querying and operating on integers. */
490namespace wi
491{
492 template <typename T>
493 unsigned int get_precision (const T &);
494
495 template <typename T1, typename T2>
496 unsigned int get_binary_precision (const T1 &, const T2 &);
497
498 template <typename T1, typename T2>
499 void copy (T1 &, const T2 &);
500
501#define UNARY_PREDICATE \
502 template <typename T> bool
503#define UNARY_FUNCTION \
504 template <typename T> WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
505#define BINARY_PREDICATE \
506 template <typename T1, typename T2> bool
507#define BINARY_FUNCTION \
508 template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
509#define SHIFT_FUNCTION \
510 template <typename T1, typename T2> WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type
511
512 UNARY_PREDICATE fits_shwi_p (const T &);
513 UNARY_PREDICATE fits_uhwi_p (const T &);
514 UNARY_PREDICATE neg_p (const T &, signop = SIGNED);
515
516 template <typename T>
517 HOST_WIDE_INTlong sign_mask (const T &);
518
519 BINARY_PREDICATE eq_p (const T1 &, const T2 &);
520 BINARY_PREDICATE ne_p (const T1 &, const T2 &);
521 BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop);
522 BINARY_PREDICATE lts_p (const T1 &, const T2 &);
523 BINARY_PREDICATE ltu_p (const T1 &, const T2 &);
524 BINARY_PREDICATE le_p (const T1 &, const T2 &, signop);
525 BINARY_PREDICATE les_p (const T1 &, const T2 &);
526 BINARY_PREDICATE leu_p (const T1 &, const T2 &);
527 BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop);
528 BINARY_PREDICATE gts_p (const T1 &, const T2 &);
529 BINARY_PREDICATE gtu_p (const T1 &, const T2 &);
530 BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop);
531 BINARY_PREDICATE ges_p (const T1 &, const T2 &);
532 BINARY_PREDICATE geu_p (const T1 &, const T2 &);
533
534 template <typename T1, typename T2>
535 int cmp (const T1 &, const T2 &, signop);
536
537 template <typename T1, typename T2>
538 int cmps (const T1 &, const T2 &);
539
540 template <typename T1, typename T2>
541 int cmpu (const T1 &, const T2 &);
542
543 UNARY_FUNCTION bit_not (const T &);
544 UNARY_FUNCTION neg (const T &);
545 UNARY_FUNCTION neg (const T &, overflow_type *);
546 UNARY_FUNCTION abs (const T &);
547 UNARY_FUNCTION ext (const T &, unsigned int, signop);
548 UNARY_FUNCTION sext (const T &, unsigned int);
549 UNARY_FUNCTION zext (const T &, unsigned int);
550 UNARY_FUNCTION set_bit (const T &, unsigned int);
551
552 BINARY_FUNCTION min (const T1 &, const T2 &, signop);
553 BINARY_FUNCTION smin (const T1 &, const T2 &);
554 BINARY_FUNCTION umin (const T1 &, const T2 &);
555 BINARY_FUNCTION max (const T1 &, const T2 &, signop);
556 BINARY_FUNCTION smax (const T1 &, const T2 &);
557 BINARY_FUNCTION umax (const T1 &, const T2 &);
558
559 BINARY_FUNCTION bit_and (const T1 &, const T2 &);
560 BINARY_FUNCTION bit_and_not (const T1 &, const T2 &);
561 BINARY_FUNCTION bit_or (const T1 &, const T2 &);
562 BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
563 BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
564 BINARY_FUNCTION add (const T1 &, const T2 &);
565 BINARY_FUNCTION add (const T1 &, const T2 &, signop, overflow_type *);
566 BINARY_FUNCTION sub (const T1 &, const T2 &);
567 BINARY_FUNCTION sub (const T1 &, const T2 &, signop, overflow_type *);
568 BINARY_FUNCTION mul (const T1 &, const T2 &);
569 BINARY_FUNCTION mul (const T1 &, const T2 &, signop, overflow_type *);
570 BINARY_FUNCTION smul (const T1 &, const T2 &, overflow_type *);
571 BINARY_FUNCTION umul (const T1 &, const T2 &, overflow_type *);
572 BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
573 BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop,
574 overflow_type * = 0);
575 BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
576 BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
577 BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop,
578 overflow_type * = 0);
579 BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
580 BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
581 BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop,
582 overflow_type * = 0);
583 BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &);
584 BINARY_FUNCTION div_round (const T1 &, const T2 &, signop,
585 overflow_type * = 0);
586 BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
587 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type *);
588 BINARY_FUNCTION gcd (const T1 &, const T2 &, signop = UNSIGNED);
589 BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop,
590 overflow_type * = 0);
591 BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
592 BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
593 BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop,
594 overflow_type * = 0);
595 BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
596 BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop,
597 overflow_type * = 0);
598 BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop,
599 overflow_type * = 0);
600
601 template <typename T1, typename T2>
602 bool multiple_of_p (const T1 &, const T2 &, signop);
603
604 template <typename T1, typename T2>
605 bool multiple_of_p (const T1 &, const T2 &, signop,
606 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type *);
607
608 SHIFT_FUNCTION lshift (const T1 &, const T2 &);
609 SHIFT_FUNCTION lrshift (const T1 &, const T2 &);
610 SHIFT_FUNCTION arshift (const T1 &, const T2 &);
611 SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn);
612 SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int = 0);
613 SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int = 0);
614
615#undef SHIFT_FUNCTION
616#undef BINARY_PREDICATE
617#undef BINARY_FUNCTION
618#undef UNARY_PREDICATE
619#undef UNARY_FUNCTION
620
621 bool only_sign_bit_p (const wide_int_ref &, unsigned int);
622 bool only_sign_bit_p (const wide_int_ref &);
623 int clz (const wide_int_ref &);
624 int clrsb (const wide_int_ref &);
625 int ctz (const wide_int_ref &);
626 int exact_log2 (const wide_int_ref &);
627 int floor_log2 (const wide_int_ref &);
628 int ffs (const wide_int_ref &);
629 int popcount (const wide_int_ref &);
630 int parity (const wide_int_ref &);
631
632 template <typename T>
633 unsigned HOST_WIDE_INTlong extract_uhwi (const T &, unsigned int, unsigned int);
634
635 template <typename T>
636 unsigned int min_precision (const T &, signop);
637
638 static inline void accumulate_overflow (overflow_type &, overflow_type);
639}
640
641namespace wi
642{
643 /* Contains the components of a decomposed integer for easy, direct
644 access. */
645 class storage_ref
646 {
647 public:
648 storage_ref () {}
649 storage_ref (const HOST_WIDE_INTlong *, unsigned int, unsigned int);
650
651 const HOST_WIDE_INTlong *val;
652 unsigned int len;
653 unsigned int precision;
654
655 /* Provide enough trappings for this class to act as storage for
656 generic_wide_int. */
657 unsigned int get_len () const;
658 unsigned int get_precision () const;
659 const HOST_WIDE_INTlong *get_val () const;
660 };
661}
662
663inline::wi::storage_ref::storage_ref (const HOST_WIDE_INTlong *val_in,
664 unsigned int len_in,
665 unsigned int precision_in)
666 : val (val_in), len (len_in), precision (precision_in)
667{
668}
669
670inline unsigned int
671wi::storage_ref::get_len () const
672{
673 return len;
674}
675
676inline unsigned int
677wi::storage_ref::get_precision () const
678{
679 return precision;
680}
681
682inline const HOST_WIDE_INTlong *
683wi::storage_ref::get_val () const
684{
685 return val;
686}
687
688/* This class defines an integer type using the storage provided by the
689 template argument. The storage class must provide the following
690 functions:
691
692 unsigned int get_precision () const
693 Return the number of bits in the integer.
694
695 HOST_WIDE_INT *get_val () const
696 Return a pointer to the array of blocks that encodes the integer.
697
698 unsigned int get_len () const
699 Return the number of blocks in get_val (). If this is smaller
700 than the number of blocks implied by get_precision (), the
701 remaining blocks are sign extensions of block get_len () - 1.
702
703 Although not required by generic_wide_int itself, writable storage
704 classes can also provide the following functions:
705
706 HOST_WIDE_INT *write_val ()
707 Get a modifiable version of get_val ()
708
709 unsigned int set_len (unsigned int len)
710 Set the value returned by get_len () to LEN. */
711template <typename storage>
712class GTY(()) generic_wide_int : public storage
713{
714public:
715 generic_wide_int ();
30
Calling default constructor for 'fixed_wide_int_storage<128>'
32
Returning from default constructor for 'fixed_wide_int_storage<128>'
50
Calling default constructor for 'fixed_wide_int_storage<128>'
52
Returning from default constructor for 'fixed_wide_int_storage<128>'
716
717 template <typename T>
718 generic_wide_int (const T &);
719
720 template <typename T>
721 generic_wide_int (const T &, unsigned int);
722
723 /* Conversions. */
724 HOST_WIDE_INTlong to_shwi (unsigned int) const;
725 HOST_WIDE_INTlong to_shwi () const;
726 unsigned HOST_WIDE_INTlong to_uhwi (unsigned int) const;
727 unsigned HOST_WIDE_INTlong to_uhwi () const;
728 HOST_WIDE_INTlong to_short_addr () const;
729
730 /* Public accessors for the interior of a wide int. */
731 HOST_WIDE_INTlong sign_mask () const;
732 HOST_WIDE_INTlong elt (unsigned int) const;
733 HOST_WIDE_INTlong sext_elt (unsigned int) const;
734 unsigned HOST_WIDE_INTlong ulow () const;
735 unsigned HOST_WIDE_INTlong uhigh () const;
736 HOST_WIDE_INTlong slow () const;
737 HOST_WIDE_INTlong shigh () const;
738
739 template <typename T>
740 generic_wide_int &operator = (const T &);
741
742#define ASSIGNMENT_OPERATOR(OP, F) \
743 template <typename T> \
744 generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
745
746/* Restrict these to cases where the shift operator is defined. */
747#define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \
748 template <typename T> \
749 generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); }
750
751#define INCDEC_OPERATOR(OP, DELTA) \
752 generic_wide_int &OP () { *this += DELTA; return *this; }
753
754 ASSIGNMENT_OPERATOR (operator &=, bit_and)
755 ASSIGNMENT_OPERATOR (operator |=, bit_or)
756 ASSIGNMENT_OPERATOR (operator ^=, bit_xor)
757 ASSIGNMENT_OPERATOR (operator +=, add)
758 ASSIGNMENT_OPERATOR (operator -=, sub)
759 ASSIGNMENT_OPERATOR (operator *=, mul)
760 ASSIGNMENT_OPERATOR (operator <<=, lshift)
761 SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>)
762 INCDEC_OPERATOR (operator ++, 1)
763 INCDEC_OPERATOR (operator --, -1)
764
765#undef SHIFT_ASSIGNMENT_OPERATOR
766#undef ASSIGNMENT_OPERATOR
767#undef INCDEC_OPERATOR
768
769 /* Debugging functions. */
770 void dump () const;
771
772 static const bool is_sign_extended
773 = wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
774};
775
776template <typename storage>
777inline generic_wide_int <storage>::generic_wide_int () {}
33
Returning without writing to 'this->len'
53
Returning without writing to 'this->len'
778
779template <typename storage>
780template <typename T>
781inline generic_wide_int <storage>::generic_wide_int (const T &x)
782 : storage (x)
783{
784}
785
786template <typename storage>
787template <typename T>
788inline generic_wide_int <storage>::generic_wide_int (const T &x,
789 unsigned int precision)
790 : storage (x, precision)
70
Calling constructor for 'wide_int_ref_storage<true, false>'
791{
792}
793
794/* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION.
795 If THIS does not fit in PRECISION, the information is lost. */
796template <typename storage>
797inline HOST_WIDE_INTlong
798generic_wide_int <storage>::to_shwi (unsigned int precision) const
799{
800 if (precision < HOST_BITS_PER_WIDE_INT64)
801 return sext_hwi (this->get_val ()[0], precision);
802 else
803 return this->get_val ()[0];
804}
805
806/* Return THIS as a signed HOST_WIDE_INT, in its natural precision. */
807template <typename storage>
808inline HOST_WIDE_INTlong
809generic_wide_int <storage>::to_shwi () const
810{
811 if (is_sign_extended)
812 return this->get_val ()[0];
813 else
814 return to_shwi (this->get_precision ());
815}
816
817/* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from
818 PRECISION. If THIS does not fit in PRECISION, the information
819 is lost. */
820template <typename storage>
821inline unsigned HOST_WIDE_INTlong
822generic_wide_int <storage>::to_uhwi (unsigned int precision) const
823{
824 if (precision < HOST_BITS_PER_WIDE_INT64)
825 return zext_hwi (this->get_val ()[0], precision);
826 else
827 return this->get_val ()[0];
828}
829
830/* Return THIS as an signed HOST_WIDE_INT, in its natural precision. */
831template <typename storage>
832inline unsigned HOST_WIDE_INTlong
833generic_wide_int <storage>::to_uhwi () const
834{
835 return to_uhwi (this->get_precision ());
836}
837
838/* TODO: The compiler is half converted from using HOST_WIDE_INT to
839 represent addresses to using offset_int to represent addresses.
840 We use to_short_addr at the interface from new code to old,
841 unconverted code. */
842template <typename storage>
843inline HOST_WIDE_INTlong
844generic_wide_int <storage>::to_short_addr () const
845{
846 return this->get_val ()[0];
847}
848
849/* Return the implicit value of blocks above get_len (). */
850template <typename storage>
851inline HOST_WIDE_INTlong
852generic_wide_int <storage>::sign_mask () const
853{
854 unsigned int len = this->get_len ();
855 gcc_assert (len > 0)((void)(!(len > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.h"
, 855, __FUNCTION__), 0 : 0))
;
856
857 unsigned HOST_WIDE_INTlong high = this->get_val ()[len - 1];
858 if (!is_sign_extended)
859 {
860 unsigned int precision = this->get_precision ();
861 int excess = len * HOST_BITS_PER_WIDE_INT64 - precision;
862 if (excess > 0)
863 high <<= excess;
864 }
865 return (HOST_WIDE_INTlong) (high) < 0 ? -1 : 0;
866}
867
868/* Return the signed value of the least-significant explicitly-encoded
869 block. */
870template <typename storage>
871inline HOST_WIDE_INTlong
872generic_wide_int <storage>::slow () const
873{
874 return this->get_val ()[0];
875}
876
877/* Return the signed value of the most-significant explicitly-encoded
878 block. */
879template <typename storage>
880inline HOST_WIDE_INTlong
881generic_wide_int <storage>::shigh () const
882{
883 return this->get_val ()[this->get_len () - 1];
884}
885
886/* Return the unsigned value of the least-significant
887 explicitly-encoded block. */
888template <typename storage>
889inline unsigned HOST_WIDE_INTlong
890generic_wide_int <storage>::ulow () const
891{
892 return this->get_val ()[0];
893}
894
895/* Return the unsigned value of the most-significant
896 explicitly-encoded block. */
897template <typename storage>
898inline unsigned HOST_WIDE_INTlong
899generic_wide_int <storage>::uhigh () const
900{
901 return this->get_val ()[this->get_len () - 1];
902}
903
904/* Return block I, which might be implicitly or explicit encoded. */
905template <typename storage>
906inline HOST_WIDE_INTlong
907generic_wide_int <storage>::elt (unsigned int i) const
908{
909 if (i >= this->get_len ())
910 return sign_mask ();
911 else
912 return this->get_val ()[i];
913}
914
915/* Like elt, but sign-extend beyond the upper bit, instead of returning
916 the raw encoding. */
917template <typename storage>
918inline HOST_WIDE_INTlong
919generic_wide_int <storage>::sext_elt (unsigned int i) const
920{
921 HOST_WIDE_INTlong elt_i = elt (i);
922 if (!is_sign_extended)
923 {
924 unsigned int precision = this->get_precision ();
925 unsigned int lsb = i * HOST_BITS_PER_WIDE_INT64;
926 if (precision - lsb < HOST_BITS_PER_WIDE_INT64)
927 elt_i = sext_hwi (elt_i, precision - lsb);
928 }
929 return elt_i;
930}
931
932template <typename storage>
933template <typename T>
934inline generic_wide_int <storage> &
935generic_wide_int <storage>::operator = (const T &x)
936{
937 storage::operator = (x);
938 return *this;
939}
940
941/* Dump the contents of the integer to stderr, for debugging. */
942template <typename storage>
943void
944generic_wide_int <storage>::dump () const
945{
946 unsigned int len = this->get_len ();
947 const HOST_WIDE_INTlong *val = this->get_val ();
948 unsigned int precision = this->get_precision ();
949 fprintf (stderrstderr, "[");
950 if (len * HOST_BITS_PER_WIDE_INT64 < precision)
951 fprintf (stderrstderr, "...,");
952 for (unsigned int i = 0; i < len - 1; ++i)
953 fprintf (stderrstderr, HOST_WIDE_INT_PRINT_HEX"%#" "l" "x" ",", val[len - 1 - i]);
954 fprintf (stderrstderr, HOST_WIDE_INT_PRINT_HEX"%#" "l" "x" "], precision = %d\n",
955 val[0], precision);
956}
957
958namespace wi
959{
960 template <typename storage>
961 struct int_traits < generic_wide_int <storage> >
962 : public wi::int_traits <storage>
963 {
964 static unsigned int get_precision (const generic_wide_int <storage> &);
965 static wi::storage_ref decompose (HOST_WIDE_INTlong *, unsigned int,
966 const generic_wide_int <storage> &);
967 };
968}
969
970template <typename storage>
971inline unsigned int
972wi::int_traits < generic_wide_int <storage> >::
973get_precision (const generic_wide_int <storage> &x)
974{
975 return x.get_precision ();
976}
977
978template <typename storage>
979inline wi::storage_ref
980wi::int_traits < generic_wide_int <storage> >::
981decompose (HOST_WIDE_INTlong *, unsigned int precision,
982 const generic_wide_int <storage> &x)
983{
984 gcc_checking_assert (precision == x.get_precision ())((void)(!(precision == x.get_precision ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.h"
, 984, __FUNCTION__), 0 : 0))
;
72
'?' condition is false
985 return wi::storage_ref (x.get_val (), x.get_len (), precision);
73
Calling 'fixed_wide_int_storage::get_len'
986}
987
988/* Provide the storage for a wide_int_ref. This acts like a read-only
989 wide_int, with the optimization that VAL is normally a pointer to
990 another integer's storage, so that no array copy is needed. */
991template <bool SE, bool HDP>
992class wide_int_ref_storage : public wi::storage_ref
993{
994private:
995 /* Scratch space that can be used when decomposing the original integer.
996 It must live as long as this object. */
997 HOST_WIDE_INTlong scratch[2];
998
999public:
1000 wide_int_ref_storage () {}
1001
1002 wide_int_ref_storage (const wi::storage_ref &);
1003
1004 template <typename T>
1005 wide_int_ref_storage (const T &);
1006
1007 template <typename T>
1008 wide_int_ref_storage (const T &, unsigned int);
1009};
1010
1011/* Create a reference from an existing reference. */
1012template <bool SE, bool HDP>
1013inline wide_int_ref_storage <SE, HDP>::
1014wide_int_ref_storage (const wi::storage_ref &x)
1015 : storage_ref (x)
1016{}
1017
1018/* Create a reference to integer X in its natural precision. Note
1019 that the natural precision is host-dependent for primitive
1020 types. */
1021template <bool SE, bool HDP>
1022template <typename T>
1023inline wide_int_ref_storage <SE, HDP>::wide_int_ref_storage (const T &x)
1024 : storage_ref (wi::int_traits <T>::decompose (scratch,
1025 wi::get_precision (x), x))
1026{
1027}
1028
1029/* Create a reference to integer X in precision PRECISION. */
1030template <bool SE, bool HDP>
1031template <typename T>
1032inline wide_int_ref_storage <SE, HDP>::
1033wide_int_ref_storage (const T &x, unsigned int precision)
1034 : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
71
Calling 'int_traits::decompose'
1035{
1036}
1037
1038namespace wi
1039{
1040 template <bool SE, bool HDP>
1041 struct int_traits <wide_int_ref_storage <SE, HDP> >
1042 {
1043 static const enum precision_type precision_type = VAR_PRECISION;
1044 static const bool host_dependent_precision = HDP;
1045 static const bool is_sign_extended = SE;
1046 };
1047}
1048
1049namespace wi
1050{
1051 unsigned int force_to_size (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1052 unsigned int, unsigned int, unsigned int,
1053 signop sgn);
1054 unsigned int from_array (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1055 unsigned int, unsigned int, bool = true);
1056}
1057
1058/* The storage used by wide_int. */
1059class GTY(()) wide_int_storage
1060{
1061private:
1062 HOST_WIDE_INTlong val[WIDE_INT_MAX_ELTS(((64*(8)) + 64) / 64)];
1063 unsigned int len;
1064 unsigned int precision;
1065
1066public:
1067 wide_int_storage ();
1068 template <typename T>
1069 wide_int_storage (const T &);
1070
1071 /* The standard generic_wide_int storage methods. */
1072 unsigned int get_precision () const;
1073 const HOST_WIDE_INTlong *get_val () const;
1074 unsigned int get_len () const;
1075 HOST_WIDE_INTlong *write_val ();
1076 void set_len (unsigned int, bool = false);
1077
1078 template <typename T>
1079 wide_int_storage &operator = (const T &);
1080
1081 static wide_int from (const wide_int_ref &, unsigned int, signop);
1082 static wide_int from_array (const HOST_WIDE_INTlong *, unsigned int,
1083 unsigned int, bool = true);
1084 static wide_int create (unsigned int);
1085
1086 /* FIXME: target-dependent, so should disappear. */
1087 wide_int bswap () const;
1088};
1089
1090namespace wi
1091{
1092 template <>
1093 struct int_traits <wide_int_storage>
1094 {
1095 static const enum precision_type precision_type = VAR_PRECISION;
1096 /* Guaranteed by a static assert in the wide_int_storage constructor. */
1097 static const bool host_dependent_precision = false;
1098 static const bool is_sign_extended = true;
1099 template <typename T1, typename T2>
1100 static wide_int get_binary_result (const T1 &, const T2 &);
1101 };
1102}
1103
1104inline wide_int_storage::wide_int_storage () {}
1105
1106/* Initialize the storage from integer X, in its natural precision.
1107 Note that we do not allow integers with host-dependent precision
1108 to become wide_ints; wide_ints must always be logically independent
1109 of the host. */
1110template <typename T>
1111inline wide_int_storage::wide_int_storage (const T &x)
1112{
1113 { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision)static_assert ((!wi::int_traits<T>::host_dependent_precision
), "!wi::int_traits<T>::host_dependent_precision")
; }
1114 { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION)static_assert ((wi::int_traits<T>::precision_type != wi
::CONST_PRECISION), "wi::int_traits<T>::precision_type != wi::CONST_PRECISION"
)
; }
1115 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x);
1116 precision = xi.precision;
1117 wi::copy (*this, xi);
1118}
1119
1120template <typename T>
1121inline wide_int_storage&
1122wide_int_storage::operator = (const T &x)
1123{
1124 { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision)static_assert ((!wi::int_traits<T>::host_dependent_precision
), "!wi::int_traits<T>::host_dependent_precision")
; }
1125 { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION)static_assert ((wi::int_traits<T>::precision_type != wi
::CONST_PRECISION), "wi::int_traits<T>::precision_type != wi::CONST_PRECISION"
)
; }
1126 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x);
1127 precision = xi.precision;
1128 wi::copy (*this, xi);
1129 return *this;
1130}
1131
1132inline unsigned int
1133wide_int_storage::get_precision () const
1134{
1135 return precision;
1136}
1137
1138inline const HOST_WIDE_INTlong *
1139wide_int_storage::get_val () const
1140{
1141 return val;
1142}
1143
1144inline unsigned int
1145wide_int_storage::get_len () const
1146{
1147 return len;
1148}
1149
1150inline HOST_WIDE_INTlong *
1151wide_int_storage::write_val ()
1152{
1153 return val;
1154}
1155
1156inline void
1157wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
1158{
1159 len = l;
1160 if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT64 > precision)
1161 val[len - 1] = sext_hwi (val[len - 1],
1162 precision % HOST_BITS_PER_WIDE_INT64);
1163}
1164
1165/* Treat X as having signedness SGN and convert it to a PRECISION-bit
1166 number. */
1167inline wide_int
1168wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
1169 signop sgn)
1170{
1171 wide_int result = wide_int::create (precision);
1172 result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
1173 x.precision, precision, sgn));
1174 return result;
1175}
1176
1177/* Create a wide_int from the explicit block encoding given by VAL and
1178 LEN. PRECISION is the precision of the integer. NEED_CANON_P is
1179 true if the encoding may have redundant trailing blocks. */
1180inline wide_int
1181wide_int_storage::from_array (const HOST_WIDE_INTlong *val, unsigned int len,
1182 unsigned int precision, bool need_canon_p)
1183{
1184 wide_int result = wide_int::create (precision);
1185 result.set_len (wi::from_array (result.write_val (), val, len, precision,
1186 need_canon_p));
1187 return result;
1188}
1189
1190/* Return an uninitialized wide_int with precision PRECISION. */
1191inline wide_int
1192wide_int_storage::create (unsigned int precision)
1193{
1194 wide_int x;
1195 x.precision = precision;
1196 return x;
1197}
1198
1199template <typename T1, typename T2>
1200inline wide_int
1201wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
1202{
1203 /* This shouldn't be used for two flexible-precision inputs. */
1204 STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISIONstatic_assert ((wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
|| wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION
), "wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION"
)
1205 || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION)static_assert ((wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
|| wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION
), "wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION"
)
;
1206 if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
1207 return wide_int::create (wi::get_precision (y));
1208 else
1209 return wide_int::create (wi::get_precision (x));
1210}
1211
1212/* The storage used by FIXED_WIDE_INT (N). */
1213template <int N>
1214class GTY(()) fixed_wide_int_storage
1215{
1216private:
1217 HOST_WIDE_INTlong val[(N + HOST_BITS_PER_WIDE_INT64 + 1) / HOST_BITS_PER_WIDE_INT64];
1218 unsigned int len;
1219
1220public:
1221 fixed_wide_int_storage ();
1222 template <typename T>
1223 fixed_wide_int_storage (const T &);
1224
1225 /* The standard generic_wide_int storage methods. */
1226 unsigned int get_precision () const;
1227 const HOST_WIDE_INTlong *get_val () const;
1228 unsigned int get_len () const;
1229 HOST_WIDE_INTlong *write_val ();
1230 void set_len (unsigned int, bool = false);
1231
1232 static FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> > from (const wide_int_ref &, signop);
1233 static FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> > from_array (const HOST_WIDE_INTlong *, unsigned int,
1234 bool = true);
1235};
1236
1237namespace wi
1238{
1239 template <int N>
1240 struct int_traits < fixed_wide_int_storage <N> >
1241 {
1242 static const enum precision_type precision_type = CONST_PRECISION;
1243 static const bool host_dependent_precision = false;
1244 static const bool is_sign_extended = true;
1245 static const unsigned int precision = N;
1246 template <typename T1, typename T2>
1247 static FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> > get_binary_result (const T1 &, const T2 &);
1248 };
1249}
1250
1251template <int N>
1252inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
31
Returning without writing to 'this->len'
51
Returning without writing to 'this->len'
1253
1254/* Initialize the storage from integer X, in precision N. */
1255template <int N>
1256template <typename T>
1257inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x)
1258{
1259 /* Check for type compatibility. We don't want to initialize a
1260 fixed-width integer from something like a wide_int. */
1261 WI_BINARY_RESULT (T, FIXED_WIDE_INT (N))typename wi::binary_traits <T, generic_wide_int < fixed_wide_int_storage
<N> > >::result_type
*assertion ATTRIBUTE_UNUSED__attribute__ ((__unused__));
1262 wi::copy (*this, WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
(x, N));
1263}
1264
1265template <int N>
1266inline unsigned int
1267fixed_wide_int_storage <N>::get_precision () const
1268{
1269 return N;
1270}
1271
1272template <int N>
1273inline const HOST_WIDE_INTlong *
1274fixed_wide_int_storage <N>::get_val () const
1275{
1276 return val;
1277}
1278
1279template <int N>
1280inline unsigned int
1281fixed_wide_int_storage <N>::get_len () const
1282{
1283 return len;
74
Undefined or garbage value returned to caller
1284}
1285
1286template <int N>
1287inline HOST_WIDE_INTlong *
1288fixed_wide_int_storage <N>::write_val ()
1289{
1290 return val;
1291}
1292
1293template <int N>
1294inline void
1295fixed_wide_int_storage <N>::set_len (unsigned int l, bool)
1296{
1297 len = l;
1298 /* There are no excess bits in val[len - 1]. */
1299 STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0)static_assert ((N % 64 == 0), "N % HOST_BITS_PER_WIDE_INT == 0"
)
;
1300}
1301
1302/* Treat X as having signedness SGN and convert it to an N-bit number. */
1303template <int N>
1304inline FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> >
1305fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
1306{
1307 FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> > result;
1308 result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
1309 x.precision, N, sgn));
1310 return result;
1311}
1312
1313/* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by
1314 VAL and LEN. NEED_CANON_P is true if the encoding may have redundant
1315 trailing blocks. */
1316template <int N>
1317inline FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> >
1318fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INTlong *val,
1319 unsigned int len,
1320 bool need_canon_p)
1321{
1322 FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> > result;
1323 result.set_len (wi::from_array (result.write_val (), val, len,
1324 N, need_canon_p));
1325 return result;
1326}
1327
1328template <int N>
1329template <typename T1, typename T2>
1330inline FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> >
1331wi::int_traits < fixed_wide_int_storage <N> >::
1332get_binary_result (const T1 &, const T2 &)
1333{
1334 return FIXED_WIDE_INT (N)generic_wide_int < fixed_wide_int_storage <N> > ();
1335}
1336
1337/* A reference to one element of a trailing_wide_ints structure. */
1338class trailing_wide_int_storage
1339{
1340private:
1341 /* The precision of the integer, which is a fixed property of the
1342 parent trailing_wide_ints. */
1343 unsigned int m_precision;
1344
1345 /* A pointer to the length field. */
1346 unsigned char *m_len;
1347
1348 /* A pointer to the HWI array. There are enough elements to hold all
1349 values of precision M_PRECISION. */
1350 HOST_WIDE_INTlong *m_val;
1351
1352public:
1353 trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INTlong *);
1354
1355 /* The standard generic_wide_int storage methods. */
1356 unsigned int get_len () const;
1357 unsigned int get_precision () const;
1358 const HOST_WIDE_INTlong *get_val () const;
1359 HOST_WIDE_INTlong *write_val ();
1360 void set_len (unsigned int, bool = false);
1361
1362 template <typename T>
1363 trailing_wide_int_storage &operator = (const T &);
1364};
1365
1366typedef generic_wide_int <trailing_wide_int_storage> trailing_wide_int;
1367
1368/* trailing_wide_int behaves like a wide_int. */
1369namespace wi
1370{
1371 template <>
1372 struct int_traits <trailing_wide_int_storage>
1373 : public int_traits <wide_int_storage> {};
1374}
1375
1376/* A variable-length array of wide_int-like objects that can be put
1377 at the end of a variable-sized structure. The number of objects is
1378 at most N and can be set at runtime by using set_precision().
1379
1380 Use extra_size to calculate how many bytes beyond the
1381 sizeof need to be allocated. Use set_precision to initialize the
1382 structure. */
1383template <int N>
1384struct GTY((user)) trailing_wide_ints
1385{
1386private:
1387 /* The shared precision of each number. */
1388 unsigned short m_precision;
1389
1390 /* The shared maximum length of each number. */
1391 unsigned char m_max_len;
1392
1393 /* The number of elements. */
1394 unsigned char m_num_elements;
1395
1396 /* The current length of each number.
1397 Avoid char array so the whole structure is not a typeless storage
1398 that will, in turn, turn off TBAA on gimple, trees and RTL. */
1399 struct {unsigned char len;} m_len[N];
1400
1401 /* The variable-length part of the structure, which always contains
1402 at least one HWI. Element I starts at index I * M_MAX_LEN. */
1403 HOST_WIDE_INTlong m_val[1];
1404
1405public:
1406 typedef WIDE_INT_REF_FOR (trailing_wide_int_storage)generic_wide_int <wide_int_ref_storage <wi::int_traits <
trailing_wide_int_storage>::is_sign_extended, wi::int_traits
<trailing_wide_int_storage>::host_dependent_precision>
>
const_reference;
1407
1408 void set_precision (unsigned int precision, unsigned int num_elements = N);
1409 unsigned int get_precision () const { return m_precision; }
1410 unsigned int num_elements () const { return m_num_elements; }
1411 trailing_wide_int operator [] (unsigned int);
1412 const_reference operator [] (unsigned int) const;
1413 static size_t extra_size (unsigned int precision,
1414 unsigned int num_elements = N);
1415 size_t extra_size () const { return extra_size (m_precision,
1416 m_num_elements); }
1417};
1418
1419inline trailing_wide_int_storage::
1420trailing_wide_int_storage (unsigned int precision, unsigned char *len,
1421 HOST_WIDE_INTlong *val)
1422 : m_precision (precision), m_len (len), m_val (val)
1423{
1424}
1425
1426inline unsigned int
1427trailing_wide_int_storage::get_len () const
1428{
1429 return *m_len;
1430}
1431
1432inline unsigned int
1433trailing_wide_int_storage::get_precision () const
1434{
1435 return m_precision;
1436}
1437
1438inline const HOST_WIDE_INTlong *
1439trailing_wide_int_storage::get_val () const
1440{
1441 return m_val;
1442}
1443
1444inline HOST_WIDE_INTlong *
1445trailing_wide_int_storage::write_val ()
1446{
1447 return m_val;
1448}
1449
1450inline void
1451trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended)
1452{
1453 *m_len = len;
1454 if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT64 > m_precision)
1455 m_val[len - 1] = sext_hwi (m_val[len - 1],
1456 m_precision % HOST_BITS_PER_WIDE_INT64);
1457}
1458
1459template <typename T>
1460inline trailing_wide_int_storage &
1461trailing_wide_int_storage::operator = (const T &x)
1462{
1463 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x, m_precision);
1464 wi::copy (*this, xi);
1465 return *this;
1466}
1467
1468/* Initialize the structure and record that all elements have precision
1469 PRECISION. NUM_ELEMENTS can be no more than N. */
1470template <int N>
1471inline void
1472trailing_wide_ints <N>::set_precision (unsigned int precision,
1473 unsigned int num_elements)
1474{
1475 gcc_checking_assert (num_elements <= N)((void)(!(num_elements <= N) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.h"
, 1475, __FUNCTION__), 0 : 0))
;
1476 m_num_elements = num_elements;
1477 m_precision = precision;
1478 m_max_len = ((precision + HOST_BITS_PER_WIDE_INT64 - 1)
1479 / HOST_BITS_PER_WIDE_INT64);
1480}
1481
1482/* Return a reference to element INDEX. */
1483template <int N>
1484inline trailing_wide_int
1485trailing_wide_ints <N>::operator [] (unsigned int index)
1486{
1487 return trailing_wide_int_storage (m_precision, &m_len[index].len,
1488 &m_val[index * m_max_len]);
1489}
1490
1491template <int N>
1492inline typename trailing_wide_ints <N>::const_reference
1493trailing_wide_ints <N>::operator [] (unsigned int index) const
1494{
1495 return wi::storage_ref (&m_val[index * m_max_len],
1496 m_len[index].len, m_precision);
1497}
1498
1499/* Return how many extra bytes need to be added to the end of the
1500 structure in order to handle NUM_ELEMENTS wide_ints of precision
1501 PRECISION. NUM_ELEMENTS is the number of elements, and defaults
1502 to N. */
1503template <int N>
1504inline size_t
1505trailing_wide_ints <N>::extra_size (unsigned int precision,
1506 unsigned int num_elements)
1507{
1508 unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT64 - 1)
1509 / HOST_BITS_PER_WIDE_INT64);
1510 gcc_checking_assert (num_elements <= N)((void)(!(num_elements <= N) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.h"
, 1510, __FUNCTION__), 0 : 0))
;
1511 return (num_elements * max_len - 1) * sizeof (HOST_WIDE_INTlong);
1512}
1513
1514/* This macro is used in structures that end with a trailing_wide_ints field
1515 called FIELD. It declares get_NAME() and set_NAME() methods to access
1516 element I of FIELD. */
1517#define TRAILING_WIDE_INT_ACCESSOR(NAME, FIELD, I)trailing_wide_int get_NAME () { return FIELD[I]; } template <
typename T> void set_NAME (const T &x) { FIELD[I] = x;
}
\
1518 trailing_wide_int get_##NAME () { return FIELD[I]; } \
1519 template <typename T> void set_##NAME (const T &x) { FIELD[I] = x; }
1520
1521namespace wi
1522{
1523 /* Implementation of int_traits for primitive integer types like "int". */
1524 template <typename T, bool signed_p>
1525 struct primitive_int_traits
1526 {
1527 static const enum precision_type precision_type = FLEXIBLE_PRECISION;
1528 static const bool host_dependent_precision = true;
1529 static const bool is_sign_extended = true;
1530 static unsigned int get_precision (T);
1531 static wi::storage_ref decompose (HOST_WIDE_INTlong *, unsigned int, T);
1532 };
1533}
1534
1535template <typename T, bool signed_p>
1536inline unsigned int
1537wi::primitive_int_traits <T, signed_p>::get_precision (T)
1538{
1539 return sizeof (T) * CHAR_BIT8;
1540}
1541
1542template <typename T, bool signed_p>
1543inline wi::storage_ref
1544wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INTlong *scratch,
1545 unsigned int precision, T x)
1546{
1547 scratch[0] = x;
1548 if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT64)
1549 return wi::storage_ref (scratch, 1, precision);
1550 scratch[1] = 0;
1551 return wi::storage_ref (scratch, 2, precision);
1552}
1553
1554/* Allow primitive C types to be used in wi:: routines. */
1555namespace wi
1556{
1557 template <>
1558 struct int_traits <unsigned char>
1559 : public primitive_int_traits <unsigned char, false> {};
1560
1561 template <>
1562 struct int_traits <unsigned short>
1563 : public primitive_int_traits <unsigned short, false> {};
1564
1565 template <>
1566 struct int_traits <int>
1567 : public primitive_int_traits <int, true> {};
1568
1569 template <>
1570 struct int_traits <unsigned int>
1571 : public primitive_int_traits <unsigned int, false> {};
1572
1573 template <>
1574 struct int_traits <long>
1575 : public primitive_int_traits <long, true> {};
1576
1577 template <>
1578 struct int_traits <unsigned long>
1579 : public primitive_int_traits <unsigned long, false> {};
1580
1581#if defined HAVE_LONG_LONG1
1582 template <>
1583 struct int_traits <long long>
1584 : public primitive_int_traits <long long, true> {};
1585
1586 template <>
1587 struct int_traits <unsigned long long>
1588 : public primitive_int_traits <unsigned long long, false> {};
1589#endif
1590}
1591
1592namespace wi
1593{
1594 /* Stores HWI-sized integer VAL, treating it as having signedness SGN
1595 and precision PRECISION. */
1596 class hwi_with_prec
1597 {
1598 public:
1599 hwi_with_prec () {}
1600 hwi_with_prec (HOST_WIDE_INTlong, unsigned int, signop);
1601 HOST_WIDE_INTlong val;
1602 unsigned int precision;
1603 signop sgn;
1604 };
1605
1606 hwi_with_prec shwi (HOST_WIDE_INTlong, unsigned int);
1607 hwi_with_prec uhwi (unsigned HOST_WIDE_INTlong, unsigned int);
1608
1609 hwi_with_prec minus_one (unsigned int);
1610 hwi_with_prec zero (unsigned int);
1611 hwi_with_prec one (unsigned int);
1612 hwi_with_prec two (unsigned int);
1613}
1614
1615inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INTlong v, unsigned int p,
1616 signop s)
1617 : precision (p), sgn (s)
1618{
1619 if (precision < HOST_BITS_PER_WIDE_INT64)
1620 val = sext_hwi (v, precision);
1621 else
1622 val = v;
1623}
1624
1625/* Return a signed integer that has value VAL and precision PRECISION. */
1626inline wi::hwi_with_prec
1627wi::shwi (HOST_WIDE_INTlong val, unsigned int precision)
1628{
1629 return hwi_with_prec (val, precision, SIGNED);
1630}
1631
1632/* Return an unsigned integer that has value VAL and precision PRECISION. */
1633inline wi::hwi_with_prec
1634wi::uhwi (unsigned HOST_WIDE_INTlong val, unsigned int precision)
1635{
1636 return hwi_with_prec (val, precision, UNSIGNED);
1637}
1638
1639/* Return a wide int of -1 with precision PRECISION. */
1640inline wi::hwi_with_prec
1641wi::minus_one (unsigned int precision)
1642{
1643 return wi::shwi (-1, precision);
1644}
1645
1646/* Return a wide int of 0 with precision PRECISION. */
1647inline wi::hwi_with_prec
1648wi::zero (unsigned int precision)
1649{
1650 return wi::shwi (0, precision);
1651}
1652
1653/* Return a wide int of 1 with precision PRECISION. */
1654inline wi::hwi_with_prec
1655wi::one (unsigned int precision)
1656{
1657 return wi::shwi (1, precision);
1658}
1659
1660/* Return a wide int of 2 with precision PRECISION. */
1661inline wi::hwi_with_prec
1662wi::two (unsigned int precision)
1663{
1664 return wi::shwi (2, precision);
1665}
1666
1667namespace wi
1668{
1669 /* ints_for<T>::zero (X) returns a zero that, when asssigned to a T,
1670 gives that T the same precision as X. */
1671 template<typename T, precision_type = int_traits<T>::precision_type>
1672 struct ints_for
1673 {
1674 static int zero (const T &) { return 0; }
1675 };
1676
1677 template<typename T>
1678 struct ints_for<T, VAR_PRECISION>
1679 {
1680 static hwi_with_prec zero (const T &);
1681 };
1682}
1683
1684template<typename T>
1685inline wi::hwi_with_prec
1686wi::ints_for<T, wi::VAR_PRECISION>::zero (const T &x)
1687{
1688 return wi::zero (wi::get_precision (x));
1689}
1690
1691namespace wi
1692{
1693 template <>
1694 struct int_traits <wi::hwi_with_prec>
1695 {
1696 static const enum precision_type precision_type = VAR_PRECISION;
1697 /* hwi_with_prec has an explicitly-given precision, rather than the
1698 precision of HOST_WIDE_INT. */
1699 static const bool host_dependent_precision = false;
1700 static const bool is_sign_extended = true;
1701 static unsigned int get_precision (const wi::hwi_with_prec &);
1702 static wi::storage_ref decompose (HOST_WIDE_INTlong *, unsigned int,
1703 const wi::hwi_with_prec &);
1704 };
1705}
1706
1707inline unsigned int
1708wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x)
1709{
1710 return x.precision;
1711}
1712
1713inline wi::storage_ref
1714wi::int_traits <wi::hwi_with_prec>::
1715decompose (HOST_WIDE_INTlong *scratch, unsigned int precision,
1716 const wi::hwi_with_prec &x)
1717{
1718 gcc_checking_assert (precision == x.precision)((void)(!(precision == x.precision) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.h"
, 1718, __FUNCTION__), 0 : 0))
;
1719 scratch[0] = x.val;
1720 if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT64)
1721 return wi::storage_ref (scratch, 1, precision);
1722 scratch[1] = 0;
1723 return wi::storage_ref (scratch, 2, precision);
1724}
1725
1726/* Private functions for handling large cases out of line. They take
1727 individual length and array parameters because that is cheaper for
1728 the inline caller than constructing an object on the stack and
1729 passing a reference to it. (Although many callers use wide_int_refs,
1730 we generally want those to be removed by SRA.) */
1731namespace wi
1732{
1733 bool eq_p_large (const HOST_WIDE_INTlong *, unsigned int,
1734 const HOST_WIDE_INTlong *, unsigned int, unsigned int);
1735 bool lts_p_large (const HOST_WIDE_INTlong *, unsigned int, unsigned int,
1736 const HOST_WIDE_INTlong *, unsigned int);
1737 bool ltu_p_large (const HOST_WIDE_INTlong *, unsigned int, unsigned int,
1738 const HOST_WIDE_INTlong *, unsigned int);
1739 int cmps_large (const HOST_WIDE_INTlong *, unsigned int, unsigned int,
1740 const HOST_WIDE_INTlong *, unsigned int);
1741 int cmpu_large (const HOST_WIDE_INTlong *, unsigned int, unsigned int,
1742 const HOST_WIDE_INTlong *, unsigned int);
1743 unsigned int sext_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1744 unsigned int,
1745 unsigned int, unsigned int);
1746 unsigned int zext_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1747 unsigned int,
1748 unsigned int, unsigned int);
1749 unsigned int set_bit_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1750 unsigned int, unsigned int, unsigned int);
1751 unsigned int lshift_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1752 unsigned int, unsigned int, unsigned int);
1753 unsigned int lrshift_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1754 unsigned int, unsigned int, unsigned int,
1755 unsigned int);
1756 unsigned int arshift_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1757 unsigned int, unsigned int, unsigned int,
1758 unsigned int);
1759 unsigned int and_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *, unsigned int,
1760 const HOST_WIDE_INTlong *, unsigned int, unsigned int);
1761 unsigned int and_not_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1762 unsigned int, const HOST_WIDE_INTlong *,
1763 unsigned int, unsigned int);
1764 unsigned int or_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *, unsigned int,
1765 const HOST_WIDE_INTlong *, unsigned int, unsigned int);
1766 unsigned int or_not_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1767 unsigned int, const HOST_WIDE_INTlong *,
1768 unsigned int, unsigned int);
1769 unsigned int xor_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *, unsigned int,
1770 const HOST_WIDE_INTlong *, unsigned int, unsigned int);
1771 unsigned int add_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *, unsigned int,
1772 const HOST_WIDE_INTlong *, unsigned int, unsigned int,
1773 signop, overflow_type *);
1774 unsigned int sub_large (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *, unsigned int,
1775 const HOST_WIDE_INTlong *, unsigned int, unsigned int,
1776 signop, overflow_type *);
1777 unsigned int mul_internal (HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1778 unsigned int, const HOST_WIDE_INTlong *,
1779 unsigned int, unsigned int, signop,
1780 overflow_type *, bool);
1781 unsigned int divmod_internal (HOST_WIDE_INTlong *, unsigned int *,
1782 HOST_WIDE_INTlong *, const HOST_WIDE_INTlong *,
1783 unsigned int, unsigned int,
1784 const HOST_WIDE_INTlong *,
1785 unsigned int, unsigned int,
1786 signop, overflow_type *);
1787}
1788
1789/* Return the number of bits that integer X can hold. */
1790template <typename T>
1791inline unsigned int
1792wi::get_precision (const T &x)
1793{
1794 return wi::int_traits <T>::get_precision (x);
1795}
1796
1797/* Return the number of bits that the result of a binary operation can
1798 hold when the input operands are X and Y. */
1799template <typename T1, typename T2>
1800inline unsigned int
1801wi::get_binary_precision (const T1 &x, const T2 &y)
1802{
1803 return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type>::
1804 get_binary_result (x, y));
1805}
1806
1807/* Copy the contents of Y to X, but keeping X's current precision. */
1808template <typename T1, typename T2>
1809inline void
1810wi::copy (T1 &x, const T2 &y)
1811{
1812 HOST_WIDE_INTlong *xval = x.write_val ();
1813 const HOST_WIDE_INTlong *yval = y.get_val ();
1814 unsigned int len = y.get_len ();
1815 unsigned int i = 0;
1816 do
1817 xval[i] = yval[i];
1818 while (++i < len);
1819 x.set_len (len, y.is_sign_extended);
1820}
1821
1822/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
1823template <typename T>
1824inline bool
1825wi::fits_shwi_p (const T &x)
1826{
1827 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x);
1828 return xi.len == 1;
1829}
1830
1831/* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of
1832 precision. */
1833template <typename T>
1834inline bool
1835wi::fits_uhwi_p (const T &x)
1836{
1837 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x);
1838 if (xi.precision <= HOST_BITS_PER_WIDE_INT64)
1839 return true;
1840 if (xi.len == 1)
1841 return xi.slow () >= 0;
1842 return xi.len == 2 && xi.uhigh () == 0;
1843}
1844
1845/* Return true if X is negative based on the interpretation of SGN.
1846 For UNSIGNED, this is always false. */
1847template <typename T>
1848inline bool
1849wi::neg_p (const T &x, signop sgn)
1850{
1851 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x);
1852 if (sgn == UNSIGNED)
1853 return false;
1854 return xi.sign_mask () < 0;
1855}
1856
1857/* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
1858template <typename T>
1859inline HOST_WIDE_INTlong
1860wi::sign_mask (const T &x)
1861{
1862 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x);
1863 return xi.sign_mask ();
1864}
1865
1866/* Return true if X == Y. X and Y must be binary-compatible. */
1867template <typename T1, typename T2>
1868inline bool
1869wi::eq_p (const T1 &x, const T2 &y)
1870{
1871 unsigned int precision = get_binary_precision (x, y);
1872 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
1873 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
1874 if (xi.is_sign_extended && yi.is_sign_extended)
1875 {
1876 /* This case reduces to array equality. */
1877 if (xi.len != yi.len)
1878 return false;
1879 unsigned int i = 0;
1880 do
1881 if (xi.val[i] != yi.val[i])
1882 return false;
1883 while (++i != xi.len);
1884 return true;
1885 }
1886 if (LIKELY (yi.len == 1)(__builtin_expect ((yi.len == 1), 1)))
1887 {
1888 /* XI is only equal to YI if it too has a single HWI. */
1889 if (xi.len != 1)
1890 return false;
1891 /* Excess bits in xi.val[0] will be signs or zeros, so comparisons
1892 with 0 are simple. */
1893 if (STATIC_CONSTANT_P (yi.val[0] == 0)(__builtin_constant_p (yi.val[0] == 0) && (yi.val[0] ==
0))
)
1894 return xi.val[0] == 0;
1895 /* Otherwise flush out any excess bits first. */
1896 unsigned HOST_WIDE_INTlong diff = xi.val[0] ^ yi.val[0];
1897 int excess = HOST_BITS_PER_WIDE_INT64 - precision;
1898 if (excess > 0)
1899 diff <<= excess;
1900 return diff == 0;
1901 }
1902 return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
1903}
1904
1905/* Return true if X != Y. X and Y must be binary-compatible. */
1906template <typename T1, typename T2>
1907inline bool
1908wi::ne_p (const T1 &x, const T2 &y)
1909{
1910 return !eq_p (x, y);
1911}
1912
1913/* Return true if X < Y when both are treated as signed values. */
1914template <typename T1, typename T2>
1915inline bool
1916wi::lts_p (const T1 &x, const T2 &y)
1917{
1918 unsigned int precision = get_binary_precision (x, y);
1919 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
69
Calling constructor for 'generic_wide_int<wide_int_ref_storage<true, false>>'
1920 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
1921 /* We optimize x < y, where y is 64 or fewer bits. */
1922 if (wi::fits_shwi_p (yi))
1923 {
1924 /* Make lts_p (x, 0) as efficient as wi::neg_p (x). */
1925 if (STATIC_CONSTANT_P (yi.val[0] == 0)(__builtin_constant_p (yi.val[0] == 0) && (yi.val[0] ==
0))
)
1926 return neg_p (xi);
1927 /* If x fits directly into a shwi, we can compare directly. */
1928 if (wi::fits_shwi_p (xi))
1929 return xi.to_shwi () < yi.to_shwi ();
1930 /* If x doesn't fit and is negative, then it must be more
1931 negative than any value in y, and hence smaller than y. */
1932 if (neg_p (xi))
1933 return true;
1934 /* If x is positive, then it must be larger than any value in y,
1935 and hence greater than y. */
1936 return false;
1937 }
1938 /* Optimize the opposite case, if it can be detected at compile time. */
1939 if (STATIC_CONSTANT_P (xi.len == 1)(__builtin_constant_p (xi.len == 1) && (xi.len == 1)))
1940 /* If YI is negative it is lower than the least HWI.
1941 If YI is positive it is greater than the greatest HWI. */
1942 return !neg_p (yi);
1943 return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len);
1944}
1945
1946/* Return true if X < Y when both are treated as unsigned values. */
1947template <typename T1, typename T2>
1948inline bool
1949wi::ltu_p (const T1 &x, const T2 &y)
1950{
1951 unsigned int precision = get_binary_precision (x, y);
1952 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
1953 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
1954 /* Optimize comparisons with constants. */
1955 if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0)(__builtin_constant_p (yi.len == 1 && yi.val[0] >=
0) && (yi.len == 1 && yi.val[0] >= 0))
)
1956 return xi.len == 1 && xi.to_uhwi () < (unsigned HOST_WIDE_INTlong) yi.val[0];
1957 if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0)(__builtin_constant_p (xi.len == 1 && xi.val[0] >=
0) && (xi.len == 1 && xi.val[0] >= 0))
)
1958 return yi.len != 1 || yi.to_uhwi () > (unsigned HOST_WIDE_INTlong) xi.val[0];
1959 /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
1960 for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
1961 values does not change the result. */
1962 if (LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
1963 {
1964 unsigned HOST_WIDE_INTlong xl = xi.to_uhwi ();
1965 unsigned HOST_WIDE_INTlong yl = yi.to_uhwi ();
1966 return xl < yl;
1967 }
1968 return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len);
1969}
1970
1971/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
1972template <typename T1, typename T2>
1973inline bool
1974wi::lt_p (const T1 &x, const T2 &y, signop sgn)
1975{
1976 if (sgn == SIGNED)
1977 return lts_p (x, y);
1978 else
1979 return ltu_p (x, y);
1980}
1981
1982/* Return true if X <= Y when both are treated as signed values. */
1983template <typename T1, typename T2>
1984inline bool
1985wi::les_p (const T1 &x, const T2 &y)
1986{
1987 return !lts_p (y, x);
1988}
1989
1990/* Return true if X <= Y when both are treated as unsigned values. */
1991template <typename T1, typename T2>
1992inline bool
1993wi::leu_p (const T1 &x, const T2 &y)
1994{
1995 return !ltu_p (y, x);
1996}
1997
1998/* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */
1999template <typename T1, typename T2>
2000inline bool
2001wi::le_p (const T1 &x, const T2 &y, signop sgn)
2002{
2003 if (sgn == SIGNED)
2004 return les_p (x, y);
2005 else
2006 return leu_p (x, y);
2007}
2008
2009/* Return true if X > Y when both are treated as signed values. */
2010template <typename T1, typename T2>
2011inline bool
2012wi::gts_p (const T1 &x, const T2 &y)
2013{
2014 return lts_p (y, x);
2015}
2016
2017/* Return true if X > Y when both are treated as unsigned values. */
2018template <typename T1, typename T2>
2019inline bool
2020wi::gtu_p (const T1 &x, const T2 &y)
2021{
2022 return ltu_p (y, x);
2023}
2024
2025/* Return true if X > Y. Signedness of X and Y is indicated by SGN. */
2026template <typename T1, typename T2>
2027inline bool
2028wi::gt_p (const T1 &x, const T2 &y, signop sgn)
2029{
2030 if (sgn == SIGNED)
2031 return gts_p (x, y);
2032 else
2033 return gtu_p (x, y);
2034}
2035
2036/* Return true if X >= Y when both are treated as signed values. */
2037template <typename T1, typename T2>
2038inline bool
2039wi::ges_p (const T1 &x, const T2 &y)
2040{
2041 return !lts_p (x, y);
2042}
2043
2044/* Return true if X >= Y when both are treated as unsigned values. */
2045template <typename T1, typename T2>
2046inline bool
2047wi::geu_p (const T1 &x, const T2 &y)
2048{
2049 return !ltu_p (x, y);
2050}
2051
2052/* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */
2053template <typename T1, typename T2>
2054inline bool
2055wi::ge_p (const T1 &x, const T2 &y, signop sgn)
2056{
2057 if (sgn == SIGNED)
2058 return ges_p (x, y);
2059 else
2060 return geu_p (x, y);
2061}
2062
2063/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
2064 as signed values. */
2065template <typename T1, typename T2>
2066inline int
2067wi::cmps (const T1 &x, const T2 &y)
2068{
2069 unsigned int precision = get_binary_precision (x, y);
2070 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2071 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2072 if (wi::fits_shwi_p (yi))
2073 {
2074 /* Special case for comparisons with 0. */
2075 if (STATIC_CONSTANT_P (yi.val[0] == 0)(__builtin_constant_p (yi.val[0] == 0) && (yi.val[0] ==
0))
)
2076 return neg_p (xi) ? -1 : !(xi.len == 1 && xi.val[0] == 0);
2077 /* If x fits into a signed HWI, we can compare directly. */
2078 if (wi::fits_shwi_p (xi))
2079 {
2080 HOST_WIDE_INTlong xl = xi.to_shwi ();
2081 HOST_WIDE_INTlong yl = yi.to_shwi ();
2082 return xl < yl ? -1 : xl > yl;
2083 }
2084 /* If x doesn't fit and is negative, then it must be more
2085 negative than any signed HWI, and hence smaller than y. */
2086 if (neg_p (xi))
2087 return -1;
2088 /* If x is positive, then it must be larger than any signed HWI,
2089 and hence greater than y. */
2090 return 1;
2091 }
2092 /* Optimize the opposite case, if it can be detected at compile time. */
2093 if (STATIC_CONSTANT_P (xi.len == 1)(__builtin_constant_p (xi.len == 1) && (xi.len == 1)))
2094 /* If YI is negative it is lower than the least HWI.
2095 If YI is positive it is greater than the greatest HWI. */
2096 return neg_p (yi) ? 1 : -1;
2097 return cmps_large (xi.val, xi.len, precision, yi.val, yi.len);
2098}
2099
2100/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
2101 as unsigned values. */
2102template <typename T1, typename T2>
2103inline int
2104wi::cmpu (const T1 &x, const T2 &y)
2105{
2106 unsigned int precision = get_binary_precision (x, y);
2107 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2108 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2109 /* Optimize comparisons with constants. */
2110 if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0)(__builtin_constant_p (yi.len == 1 && yi.val[0] >=
0) && (yi.len == 1 && yi.val[0] >= 0))
)
2111 {
2112 /* If XI doesn't fit in a HWI then it must be larger than YI. */
2113 if (xi.len != 1)
2114 return 1;
2115 /* Otherwise compare directly. */
2116 unsigned HOST_WIDE_INTlong xl = xi.to_uhwi ();
2117 unsigned HOST_WIDE_INTlong yl = yi.val[0];
2118 return xl < yl ? -1 : xl > yl;
2119 }
2120 if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0)(__builtin_constant_p (xi.len == 1 && xi.val[0] >=
0) && (xi.len == 1 && xi.val[0] >= 0))
)
2121 {
2122 /* If YI doesn't fit in a HWI then it must be larger than XI. */
2123 if (yi.len != 1)
2124 return -1;
2125 /* Otherwise compare directly. */
2126 unsigned HOST_WIDE_INTlong xl = xi.val[0];
2127 unsigned HOST_WIDE_INTlong yl = yi.to_uhwi ();
2128 return xl < yl ? -1 : xl > yl;
2129 }
2130 /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
2131 for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
2132 values does not change the result. */
2133 if (LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
2134 {
2135 unsigned HOST_WIDE_INTlong xl = xi.to_uhwi ();
2136 unsigned HOST_WIDE_INTlong yl = yi.to_uhwi ();
2137 return xl < yl ? -1 : xl > yl;
2138 }
2139 return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len);
2140}
2141
2142/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of
2143 X and Y indicated by SGN. */
2144template <typename T1, typename T2>
2145inline int
2146wi::cmp (const T1 &x, const T2 &y, signop sgn)
2147{
2148 if (sgn == SIGNED)
2149 return cmps (x, y);
2150 else
2151 return cmpu (x, y);
2152}
2153
2154/* Return ~x. */
2155template <typename T>
2156inline WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
2157wi::bit_not (const T &x)
2158{
2159 WI_UNARY_RESULT_VAR (result, val, T, x)typename wi::binary_traits <T, T>::result_type result =
wi::int_traits <typename wi::binary_traits <T, T>::
result_type>::get_binary_result (x, x); long *val = result
.write_val ()
;
2160 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x, get_precision (result));
2161 for (unsigned int i = 0; i < xi.len; ++i)
2162 val[i] = ~xi.val[i];
2163 result.set_len (xi.len);
2164 return result;
2165}
2166
2167/* Return -x. */
2168template <typename T>
2169inline WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
2170wi::neg (const T &x)
2171{
2172 return sub (0, x);
2173}
2174
2175/* Return -x. Indicate in *OVERFLOW if performing the negation would
2176 cause an overflow. */
2177template <typename T>
2178inline WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
2179wi::neg (const T &x, overflow_type *overflow)
2180{
2181 *overflow = only_sign_bit_p (x) ? OVF_OVERFLOW : OVF_NONE;
2182 return sub (0, x);
2183}
2184
2185/* Return the absolute value of x. */
2186template <typename T>
2187inline WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
2188wi::abs (const T &x)
2189{
2190 return neg_p (x) ? neg (x) : WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type (x);
2191}
2192
2193/* Return the result of sign-extending the low OFFSET bits of X. */
2194template <typename T>
2195inline WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
2196wi::sext (const T &x, unsigned int offset)
2197{
2198 WI_UNARY_RESULT_VAR (result, val, T, x)typename wi::binary_traits <T, T>::result_type result =
wi::int_traits <typename wi::binary_traits <T, T>::
result_type>::get_binary_result (x, x); long *val = result
.write_val ()
;
2199 unsigned int precision = get_precision (result);
2200 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x, precision);
2201
2202 if (offset <= HOST_BITS_PER_WIDE_INT64)
2203 {
2204 val[0] = sext_hwi (xi.ulow (), offset);
2205 result.set_len (1, true);
2206 }
2207 else
2208 result.set_len (sext_large (val, xi.val, xi.len, precision, offset));
2209 return result;
2210}
2211
2212/* Return the result of zero-extending the low OFFSET bits of X. */
2213template <typename T>
2214inline WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
2215wi::zext (const T &x, unsigned int offset)
2216{
2217 WI_UNARY_RESULT_VAR (result, val, T, x)typename wi::binary_traits <T, T>::result_type result =
wi::int_traits <typename wi::binary_traits <T, T>::
result_type>::get_binary_result (x, x); long *val = result
.write_val ()
;
2218 unsigned int precision = get_precision (result);
2219 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x, precision);
2220
2221 /* This is not just an optimization, it is actually required to
2222 maintain canonization. */
2223 if (offset >= precision)
2224 {
2225 wi::copy (result, xi);
2226 return result;
2227 }
2228
2229 /* In these cases we know that at least the top bit will be clear,
2230 so no sign extension is necessary. */
2231 if (offset < HOST_BITS_PER_WIDE_INT64)
2232 {
2233 val[0] = zext_hwi (xi.ulow (), offset);
2234 result.set_len (1, true);
2235 }
2236 else
2237 result.set_len (zext_large (val, xi.val, xi.len, precision, offset), true);
2238 return result;
2239}
2240
2241/* Return the result of extending the low OFFSET bits of X according to
2242 signedness SGN. */
2243template <typename T>
2244inline WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
2245wi::ext (const T &x, unsigned int offset, signop sgn)
2246{
2247 return sgn == SIGNED ? sext (x, offset) : zext (x, offset);
2248}
2249
2250/* Return an integer that represents X | (1 << bit). */
2251template <typename T>
2252inline WI_UNARY_RESULT (T)typename wi::binary_traits <T, T>::result_type
2253wi::set_bit (const T &x, unsigned int bit)
2254{
2255 WI_UNARY_RESULT_VAR (result, val, T, x)typename wi::binary_traits <T, T>::result_type result =
wi::int_traits <typename wi::binary_traits <T, T>::
result_type>::get_binary_result (x, x); long *val = result
.write_val ()
;
2256 unsigned int precision = get_precision (result);
2257 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x, precision);
2258 if (precision <= HOST_BITS_PER_WIDE_INT64)
2259 {
2260 val[0] = xi.ulow () | (HOST_WIDE_INT_1U1UL << bit);
2261 result.set_len (1);
2262 }
2263 else
2264 result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit));
2265 return result;
2266}
2267
2268/* Return the mininum of X and Y, treating them both as having
2269 signedness SGN. */
2270template <typename T1, typename T2>
2271inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2272wi::min (const T1 &x, const T2 &y, signop sgn)
2273{
2274 WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val __attribute__
((__unused__)) = result.write_val ()
;
2275 unsigned int precision = get_precision (result);
2276 if (wi::le_p (x, y, sgn))
2277 wi::copy (result, WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
(x, precision));
2278 else
2279 wi::copy (result, WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
(y, precision));
2280 return result;
2281}
2282
2283/* Return the minimum of X and Y, treating both as signed values. */
2284template <typename T1, typename T2>
2285inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2286wi::smin (const T1 &x, const T2 &y)
2287{
2288 return wi::min (x, y, SIGNED);
2289}
2290
2291/* Return the minimum of X and Y, treating both as unsigned values. */
2292template <typename T1, typename T2>
2293inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2294wi::umin (const T1 &x, const T2 &y)
2295{
2296 return wi::min (x, y, UNSIGNED);
2297}
2298
2299/* Return the maxinum of X and Y, treating them both as having
2300 signedness SGN. */
2301template <typename T1, typename T2>
2302inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2303wi::max (const T1 &x, const T2 &y, signop sgn)
2304{
2305 WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val __attribute__
((__unused__)) = result.write_val ()
;
2306 unsigned int precision = get_precision (result);
2307 if (wi::ge_p (x, y, sgn))
2308 wi::copy (result, WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
(x, precision));
2309 else
2310 wi::copy (result, WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
(y, precision));
2311 return result;
2312}
2313
2314/* Return the maximum of X and Y, treating both as signed values. */
2315template <typename T1, typename T2>
2316inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2317wi::smax (const T1 &x, const T2 &y)
2318{
2319 return wi::max (x, y, SIGNED);
2320}
2321
2322/* Return the maximum of X and Y, treating both as unsigned values. */
2323template <typename T1, typename T2>
2324inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2325wi::umax (const T1 &x, const T2 &y)
2326{
2327 return wi::max (x, y, UNSIGNED);
2328}
2329
2330/* Return X & Y. */
2331template <typename T1, typename T2>
2332inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2333wi::bit_and (const T1 &x, const T2 &y)
2334{
2335 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2336 unsigned int precision = get_precision (result);
2337 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2338 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2339 bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2340 if (LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
2341 {
2342 val[0] = xi.ulow () & yi.ulow ();
2343 result.set_len (1, is_sign_extended);
2344 }
2345 else
2346 result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len,
2347 precision), is_sign_extended);
2348 return result;
2349}
2350
2351/* Return X & ~Y. */
2352template <typename T1, typename T2>
2353inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2354wi::bit_and_not (const T1 &x, const T2 &y)
2355{
2356 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2357 unsigned int precision = get_precision (result);
2358 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2359 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2360 bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2361 if (LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
2362 {
2363 val[0] = xi.ulow () & ~yi.ulow ();
2364 result.set_len (1, is_sign_extended);
2365 }
2366 else
2367 result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len,
2368 precision), is_sign_extended);
2369 return result;
2370}
2371
2372/* Return X | Y. */
2373template <typename T1, typename T2>
2374inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2375wi::bit_or (const T1 &x, const T2 &y)
2376{
2377 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2378 unsigned int precision = get_precision (result);
2379 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2380 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2381 bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2382 if (LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
2383 {
2384 val[0] = xi.ulow () | yi.ulow ();
2385 result.set_len (1, is_sign_extended);
2386 }
2387 else
2388 result.set_len (or_large (val, xi.val, xi.len,
2389 yi.val, yi.len, precision), is_sign_extended);
2390 return result;
2391}
2392
2393/* Return X | ~Y. */
2394template <typename T1, typename T2>
2395inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2396wi::bit_or_not (const T1 &x, const T2 &y)
2397{
2398 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2399 unsigned int precision = get_precision (result);
2400 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2401 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2402 bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2403 if (LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
2404 {
2405 val[0] = xi.ulow () | ~yi.ulow ();
2406 result.set_len (1, is_sign_extended);
2407 }
2408 else
2409 result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len,
2410 precision), is_sign_extended);
2411 return result;
2412}
2413
2414/* Return X ^ Y. */
2415template <typename T1, typename T2>
2416inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2417wi::bit_xor (const T1 &x, const T2 &y)
2418{
2419 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2420 unsigned int precision = get_precision (result);
2421 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2422 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2423 bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2424 if (LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
2425 {
2426 val[0] = xi.ulow () ^ yi.ulow ();
2427 result.set_len (1, is_sign_extended);
2428 }
2429 else
2430 result.set_len (xor_large (val, xi.val, xi.len,
2431 yi.val, yi.len, precision), is_sign_extended);
2432 return result;
2433}
2434
2435/* Return X + Y. */
2436template <typename T1, typename T2>
2437inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2438wi::add (const T1 &x, const T2 &y)
2439{
2440 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2441 unsigned int precision = get_precision (result);
2442 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2443 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2444 if (precision <= HOST_BITS_PER_WIDE_INT64)
2445 {
2446 val[0] = xi.ulow () + yi.ulow ();
2447 result.set_len (1);
2448 }
2449 /* If the precision is known at compile time to be greater than
2450 HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
2451 knowing that (a) all bits in those HWIs are significant and
2452 (b) the result has room for at least two HWIs. This provides
2453 a fast path for things like offset_int and widest_int.
2454
2455 The STATIC_CONSTANT_P test prevents this path from being
2456 used for wide_ints. wide_ints with precisions greater than
2457 HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
2458 point handling them inline. */
2459 else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)(__builtin_constant_p (precision > 64) && (precision
> 64))
2460 && LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
2461 {
2462 unsigned HOST_WIDE_INTlong xl = xi.ulow ();
2463 unsigned HOST_WIDE_INTlong yl = yi.ulow ();
2464 unsigned HOST_WIDE_INTlong resultl = xl + yl;
2465 val[0] = resultl;
2466 val[1] = (HOST_WIDE_INTlong) resultl < 0 ? 0 : -1;
2467 result.set_len (1 + (((resultl ^ xl) & (resultl ^ yl))
2468 >> (HOST_BITS_PER_WIDE_INT64 - 1)));
2469 }
2470 else
2471 result.set_len (add_large (val, xi.val, xi.len,
2472 yi.val, yi.len, precision,
2473 UNSIGNED, 0));
2474 return result;
2475}
2476
2477/* Return X + Y. Treat X and Y as having the signednes given by SGN
2478 and indicate in *OVERFLOW whether the operation overflowed. */
2479template <typename T1, typename T2>
2480inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2481wi::add (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2482{
2483 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2484 unsigned int precision = get_precision (result);
2485 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2486 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2487 if (precision <= HOST_BITS_PER_WIDE_INT64)
2488 {
2489 unsigned HOST_WIDE_INTlong xl = xi.ulow ();
2490 unsigned HOST_WIDE_INTlong yl = yi.ulow ();
2491 unsigned HOST_WIDE_INTlong resultl = xl + yl;
2492 if (sgn == SIGNED)
2493 {
2494 if ((((resultl ^ xl) & (resultl ^ yl))
2495 >> (precision - 1)) & 1)
2496 {
2497 if (xl > resultl)
2498 *overflow = OVF_UNDERFLOW;
2499 else if (xl < resultl)
2500 *overflow = OVF_OVERFLOW;
2501 else
2502 *overflow = OVF_NONE;
2503 }
2504 else
2505 *overflow = OVF_NONE;
2506 }
2507 else
2508 *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT64 - precision))
2509 < (xl << (HOST_BITS_PER_WIDE_INT64 - precision)))
2510 ? OVF_OVERFLOW : OVF_NONE;
2511 val[0] = resultl;
2512 result.set_len (1);
2513 }
2514 else
2515 result.set_len (add_large (val, xi.val, xi.len,
2516 yi.val, yi.len, precision,
2517 sgn, overflow));
2518 return result;
2519}
2520
2521/* Return X - Y. */
2522template <typename T1, typename T2>
2523inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2524wi::sub (const T1 &x, const T2 &y)
2525{
2526 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2527 unsigned int precision = get_precision (result);
2528 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2529 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2530 if (precision <= HOST_BITS_PER_WIDE_INT64)
2531 {
2532 val[0] = xi.ulow () - yi.ulow ();
2533 result.set_len (1);
2534 }
2535 /* If the precision is known at compile time to be greater than
2536 HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
2537 knowing that (a) all bits in those HWIs are significant and
2538 (b) the result has room for at least two HWIs. This provides
2539 a fast path for things like offset_int and widest_int.
2540
2541 The STATIC_CONSTANT_P test prevents this path from being
2542 used for wide_ints. wide_ints with precisions greater than
2543 HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
2544 point handling them inline. */
2545 else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)(__builtin_constant_p (precision > 64) && (precision
> 64))
2546 && LIKELY (xi.len + yi.len == 2)(__builtin_expect ((xi.len + yi.len == 2), 1)))
2547 {
2548 unsigned HOST_WIDE_INTlong xl = xi.ulow ();
2549 unsigned HOST_WIDE_INTlong yl = yi.ulow ();
2550 unsigned HOST_WIDE_INTlong resultl = xl - yl;
2551 val[0] = resultl;
2552 val[1] = (HOST_WIDE_INTlong) resultl < 0 ? 0 : -1;
2553 result.set_len (1 + (((resultl ^ xl) & (xl ^ yl))
2554 >> (HOST_BITS_PER_WIDE_INT64 - 1)));
2555 }
2556 else
2557 result.set_len (sub_large (val, xi.val, xi.len,
2558 yi.val, yi.len, precision,
2559 UNSIGNED, 0));
2560 return result;
2561}
2562
2563/* Return X - Y. Treat X and Y as having the signednes given by SGN
2564 and indicate in *OVERFLOW whether the operation overflowed. */
2565template <typename T1, typename T2>
2566inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2567wi::sub (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2568{
2569 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2570 unsigned int precision = get_precision (result);
2571 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2572 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2573 if (precision <= HOST_BITS_PER_WIDE_INT64)
2574 {
2575 unsigned HOST_WIDE_INTlong xl = xi.ulow ();
2576 unsigned HOST_WIDE_INTlong yl = yi.ulow ();
2577 unsigned HOST_WIDE_INTlong resultl = xl - yl;
2578 if (sgn == SIGNED)
2579 {
2580 if ((((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1)
2581 {
2582 if (xl > yl)
2583 *overflow = OVF_UNDERFLOW;
2584 else if (xl < yl)
2585 *overflow = OVF_OVERFLOW;
2586 else
2587 *overflow = OVF_NONE;
2588 }
2589 else
2590 *overflow = OVF_NONE;
2591 }
2592 else
2593 *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT64 - precision))
2594 > (xl << (HOST_BITS_PER_WIDE_INT64 - precision)))
2595 ? OVF_UNDERFLOW : OVF_NONE;
2596 val[0] = resultl;
2597 result.set_len (1);
2598 }
2599 else
2600 result.set_len (sub_large (val, xi.val, xi.len,
2601 yi.val, yi.len, precision,
2602 sgn, overflow));
2603 return result;
2604}
2605
2606/* Return X * Y. */
2607template <typename T1, typename T2>
2608inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2609wi::mul (const T1 &x, const T2 &y)
2610{
2611 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2612 unsigned int precision = get_precision (result);
2613 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2614 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2615 if (precision <= HOST_BITS_PER_WIDE_INT64)
2616 {
2617 val[0] = xi.ulow () * yi.ulow ();
2618 result.set_len (1);
2619 }
2620 else
2621 result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len,
2622 precision, UNSIGNED, 0, false));
2623 return result;
2624}
2625
2626/* Return X * Y. Treat X and Y as having the signednes given by SGN
2627 and indicate in *OVERFLOW whether the operation overflowed. */
2628template <typename T1, typename T2>
2629inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2630wi::mul (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2631{
2632 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2633 unsigned int precision = get_precision (result);
2634 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2635 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2636 result.set_len (mul_internal (val, xi.val, xi.len,
2637 yi.val, yi.len, precision,
2638 sgn, overflow, false));
2639 return result;
2640}
2641
2642/* Return X * Y, treating both X and Y as signed values. Indicate in
2643 *OVERFLOW whether the operation overflowed. */
2644template <typename T1, typename T2>
2645inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2646wi::smul (const T1 &x, const T2 &y, overflow_type *overflow)
2647{
2648 return mul (x, y, SIGNED, overflow);
2649}
2650
2651/* Return X * Y, treating both X and Y as unsigned values. Indicate in
2652 *OVERFLOW if the result overflows. */
2653template <typename T1, typename T2>
2654inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2655wi::umul (const T1 &x, const T2 &y, overflow_type *overflow)
2656{
2657 return mul (x, y, UNSIGNED, overflow);
2658}
2659
2660/* Perform a widening multiplication of X and Y, extending the values
2661 according to SGN, and return the high part of the result. */
2662template <typename T1, typename T2>
2663inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2664wi::mul_high (const T1 &x, const T2 &y, signop sgn)
2665{
2666 WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *val = result
.write_val ()
;
2667 unsigned int precision = get_precision (result);
2668 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2669 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y, precision);
2670 result.set_len (mul_internal (val, xi.val, xi.len,
2671 yi.val, yi.len, precision,
2672 sgn, 0, true));
2673 return result;
2674}
2675
2676/* Return X / Y, rouding towards 0. Treat X and Y as having the
2677 signedness given by SGN. Indicate in *OVERFLOW if the result
2678 overflows. */
2679template <typename T1, typename T2>
2680inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2681wi::div_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2682{
2683 WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type quotient
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *quotient_val
= quotient.write_val ()
;
2684 unsigned int precision = get_precision (quotient);
2685 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2686 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2687
2688 quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
2689 precision,
2690 yi.val, yi.len, yi.precision,
2691 sgn, overflow));
2692 return quotient;
2693}
2694
2695/* Return X / Y, rouding towards 0. Treat X and Y as signed values. */
2696template <typename T1, typename T2>
2697inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2698wi::sdiv_trunc (const T1 &x, const T2 &y)
2699{
2700 return div_trunc (x, y, SIGNED);
2701}
2702
2703/* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */
2704template <typename T1, typename T2>
2705inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2706wi::udiv_trunc (const T1 &x, const T2 &y)
2707{
2708 return div_trunc (x, y, UNSIGNED);
2709}
2710
2711/* Return X / Y, rouding towards -inf. Treat X and Y as having the
2712 signedness given by SGN. Indicate in *OVERFLOW if the result
2713 overflows. */
2714template <typename T1, typename T2>
2715inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2716wi::div_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2717{
2718 WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type quotient
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *quotient_val
= quotient.write_val ()
;
2719 WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type remainder
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *remainder_val
= remainder.write_val ()
;
2720 unsigned int precision = get_precision (quotient);
2721 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2722 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2723
2724 unsigned int remainder_len;
2725 quotient.set_len (divmod_internal (quotient_val,
2726 &remainder_len, remainder_val,
2727 xi.val, xi.len, precision,
2728 yi.val, yi.len, yi.precision, sgn,
2729 overflow));
2730 remainder.set_len (remainder_len);
2731 if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
2732 return quotient - 1;
2733 return quotient;
2734}
2735
2736/* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */
2737template <typename T1, typename T2>
2738inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2739wi::sdiv_floor (const T1 &x, const T2 &y)
2740{
2741 return div_floor (x, y, SIGNED);
2742}
2743
2744/* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */
2745/* ??? Why do we have both this and udiv_trunc. Aren't they the same? */
2746template <typename T1, typename T2>
2747inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2748wi::udiv_floor (const T1 &x, const T2 &y)
2749{
2750 return div_floor (x, y, UNSIGNED);
2751}
2752
2753/* Return X / Y, rouding towards +inf. Treat X and Y as having the
2754 signedness given by SGN. Indicate in *OVERFLOW if the result
2755 overflows. */
2756template <typename T1, typename T2>
2757inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2758wi::div_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2759{
2760 WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type quotient
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *quotient_val
= quotient.write_val ()
;
2761 WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type remainder
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *remainder_val
= remainder.write_val ()
;
2762 unsigned int precision = get_precision (quotient);
2763 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2764 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2765
2766 unsigned int remainder_len;
2767 quotient.set_len (divmod_internal (quotient_val,
2768 &remainder_len, remainder_val,
2769 xi.val, xi.len, precision,
2770 yi.val, yi.len, yi.precision, sgn,
2771 overflow));
2772 remainder.set_len (remainder_len);
2773 if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
2774 return quotient + 1;
2775 return quotient;
2776}
2777
2778/* Return X / Y, rouding towards +inf. Treat X and Y as unsigned values. */
2779template <typename T1, typename T2>
2780inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2781wi::udiv_ceil (const T1 &x, const T2 &y)
2782{
2783 return div_ceil (x, y, UNSIGNED);
2784}
2785
2786/* Return X / Y, rouding towards nearest with ties away from zero.
2787 Treat X and Y as having the signedness given by SGN. Indicate
2788 in *OVERFLOW if the result overflows. */
2789template <typename T1, typename T2>
2790inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2791wi::div_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2792{
2793 WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type quotient
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *quotient_val
= quotient.write_val ()
;
2794 WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type remainder
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *remainder_val
= remainder.write_val ()
;
2795 unsigned int precision = get_precision (quotient);
2796 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2797 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2798
2799 unsigned int remainder_len;
2800 quotient.set_len (divmod_internal (quotient_val,
2801 &remainder_len, remainder_val,
2802 xi.val, xi.len, precision,
2803 yi.val, yi.len, yi.precision, sgn,
2804 overflow));
2805 remainder.set_len (remainder_len);
2806
2807 if (remainder != 0)
2808 {
2809 if (sgn == SIGNED)
2810 {
2811 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type abs_remainder = wi::abs (remainder);
2812 if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder)))
2813 {
2814 if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
2815 return quotient - 1;
2816 else
2817 return quotient + 1;
2818 }
2819 }
2820 else
2821 {
2822 if (wi::geu_p (remainder, wi::sub (y, remainder)))
2823 return quotient + 1;
2824 }
2825 }
2826 return quotient;
2827}
2828
2829/* Return X / Y, rouding towards 0. Treat X and Y as having the
2830 signedness given by SGN. Store the remainder in *REMAINDER_PTR. */
2831template <typename T1, typename T2>
2832inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2833wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
2834 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type *remainder_ptr)
2835{
2836 WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type quotient
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *quotient_val
= quotient.write_val ()
;
2837 WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type remainder
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *remainder_val
= remainder.write_val ()
;
2838 unsigned int precision = get_precision (quotient);
2839 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2840 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2841
2842 unsigned int remainder_len;
2843 quotient.set_len (divmod_internal (quotient_val,
2844 &remainder_len, remainder_val,
2845 xi.val, xi.len, precision,
2846 yi.val, yi.len, yi.precision, sgn, 0));
2847 remainder.set_len (remainder_len);
2848
2849 *remainder_ptr = remainder;
2850 return quotient;
2851}
2852
2853/* Compute the greatest common divisor of two numbers A and B using
2854 Euclid's algorithm. */
2855template <typename T1, typename T2>
2856inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2857wi::gcd (const T1 &a, const T2 &b, signop sgn)
2858{
2859 T1 x, y, z;
2860
2861 x = wi::abs (a);
2862 y = wi::abs (b);
2863
2864 while (gt_p (x, 0, sgn))
2865 {
2866 z = mod_trunc (y, x, sgn);
2867 y = x;
2868 x = z;
2869 }
2870
2871 return y;
2872}
2873
2874/* Compute X / Y, rouding towards 0, and return the remainder.
2875 Treat X and Y as having the signedness given by SGN. Indicate
2876 in *OVERFLOW if the division overflows. */
2877template <typename T1, typename T2>
2878inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2879wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2880{
2881 WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type remainder
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *remainder_val
= remainder.write_val ()
;
2882 unsigned int precision = get_precision (remainder);
2883 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2884 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2885
2886 unsigned int remainder_len;
2887 divmod_internal (0, &remainder_len, remainder_val,
2888 xi.val, xi.len, precision,
2889 yi.val, yi.len, yi.precision, sgn, overflow);
2890 remainder.set_len (remainder_len);
2891
2892 return remainder;
2893}
2894
2895/* Compute X / Y, rouding towards 0, and return the remainder.
2896 Treat X and Y as signed values. */
2897template <typename T1, typename T2>
2898inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2899wi::smod_trunc (const T1 &x, const T2 &y)
2900{
2901 return mod_trunc (x, y, SIGNED);
2902}
2903
2904/* Compute X / Y, rouding towards 0, and return the remainder.
2905 Treat X and Y as unsigned values. */
2906template <typename T1, typename T2>
2907inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2908wi::umod_trunc (const T1 &x, const T2 &y)
2909{
2910 return mod_trunc (x, y, UNSIGNED);
2911}
2912
2913/* Compute X / Y, rouding towards -inf, and return the remainder.
2914 Treat X and Y as having the signedness given by SGN. Indicate
2915 in *OVERFLOW if the division overflows. */
2916template <typename T1, typename T2>
2917inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2918wi::mod_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2919{
2920 WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type quotient
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *quotient_val
= quotient.write_val ()
;
2921 WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type remainder
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *remainder_val
= remainder.write_val ()
;
2922 unsigned int precision = get_precision (quotient);
2923 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2924 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2925
2926 unsigned int remainder_len;
2927 quotient.set_len (divmod_internal (quotient_val,
2928 &remainder_len, remainder_val,
2929 xi.val, xi.len, precision,
2930 yi.val, yi.len, yi.precision, sgn,
2931 overflow));
2932 remainder.set_len (remainder_len);
2933
2934 if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
2935 return remainder + y;
2936 return remainder;
2937}
2938
2939/* Compute X / Y, rouding towards -inf, and return the remainder.
2940 Treat X and Y as unsigned values. */
2941/* ??? Why do we have both this and umod_trunc. Aren't they the same? */
2942template <typename T1, typename T2>
2943inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2944wi::umod_floor (const T1 &x, const T2 &y)
2945{
2946 return mod_floor (x, y, UNSIGNED);
2947}
2948
2949/* Compute X / Y, rouding towards +inf, and return the remainder.
2950 Treat X and Y as having the signedness given by SGN. Indicate
2951 in *OVERFLOW if the division overflows. */
2952template <typename T1, typename T2>
2953inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2954wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2955{
2956 WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type quotient
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *quotient_val
= quotient.write_val ()
;
2957 WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type remainder
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *remainder_val
= remainder.write_val ()
;
2958 unsigned int precision = get_precision (quotient);
2959 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2960 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2961
2962 unsigned int remainder_len;
2963 quotient.set_len (divmod_internal (quotient_val,
2964 &remainder_len, remainder_val,
2965 xi.val, xi.len, precision,
2966 yi.val, yi.len, yi.precision, sgn,
2967 overflow));
2968 remainder.set_len (remainder_len);
2969
2970 if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
2971 return remainder - y;
2972 return remainder;
2973}
2974
2975/* Compute X / Y, rouding towards nearest with ties away from zero,
2976 and return the remainder. Treat X and Y as having the signedness
2977 given by SGN. Indicate in *OVERFLOW if the division overflows. */
2978template <typename T1, typename T2>
2979inline WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type
2980wi::mod_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
2981{
2982 WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type quotient
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *quotient_val
= quotient.write_val ()
;
2983 WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y)typename wi::binary_traits <T1, T2>::result_type remainder
= wi::int_traits <typename wi::binary_traits <T1, T2>
::result_type>::get_binary_result (x, y); long *remainder_val
= remainder.write_val ()
;
2984 unsigned int precision = get_precision (quotient);
2985 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
2986 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
2987
2988 unsigned int remainder_len;
2989 quotient.set_len (divmod_internal (quotient_val,
2990 &remainder_len, remainder_val,
2991 xi.val, xi.len, precision,
2992 yi.val, yi.len, yi.precision, sgn,
2993 overflow));
2994 remainder.set_len (remainder_len);
2995
2996 if (remainder != 0)
2997 {
2998 if (sgn == SIGNED)
2999 {
3000 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type abs_remainder = wi::abs (remainder);
3001 if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder)))
3002 {
3003 if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
3004 return remainder + y;
3005 else
3006 return remainder - y;
3007 }
3008 }
3009 else
3010 {
3011 if (wi::geu_p (remainder, wi::sub (y, remainder)))
3012 return remainder - y;
3013 }
3014 }
3015 return remainder;
3016}
3017
3018/* Return true if X is a multiple of Y. Treat X and Y as having the
3019 signedness given by SGN. */
3020template <typename T1, typename T2>
3021inline bool
3022wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn)
3023{
3024 return wi::mod_trunc (x, y, sgn) == 0;
3025}
3026
3027/* Return true if X is a multiple of Y, storing X / Y in *RES if so.
3028 Treat X and Y as having the signedness given by SGN. */
3029template <typename T1, typename T2>
3030inline bool
3031wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
3032 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type *res)
3033{
3034 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type remainder;
3035 WI_BINARY_RESULT (T1, T2)typename wi::binary_traits <T1, T2>::result_type quotient
3036 = divmod_trunc (x, y, sgn, &remainder);
3037 if (remainder == 0)
3038 {
3039 *res = quotient;
3040 return true;
3041 }
3042 return false;
3043}
3044
3045/* Return X << Y. Return 0 if Y is greater than or equal to
3046 the precision of X. */
3047template <typename T1, typename T2>
3048inline WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type
3049wi::lshift (const T1 &x, const T2 &y)
3050{
3051 WI_UNARY_RESULT_VAR (result, val, T1, x)typename wi::binary_traits <T1, T1>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T1>
::result_type>::get_binary_result (x, x); long *val = result
.write_val ()
;
3052 unsigned int precision = get_precision (result);
3053 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x, precision);
3054 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
3055 /* Handle the simple cases quickly. */
3056 if (geu_p (yi, precision))
3057 {
3058 val[0] = 0;
3059 result.set_len (1);
3060 }
3061 else
3062 {
3063 unsigned int shift = yi.to_uhwi ();
3064 /* For fixed-precision integers like offset_int and widest_int,
3065 handle the case where the shift value is constant and the
3066 result is a single nonnegative HWI (meaning that we don't
3067 need to worry about val[1]). This is particularly common
3068 for converting a byte count to a bit count.
3069
3070 For variable-precision integers like wide_int, handle HWI
3071 and sub-HWI integers inline. */
3072 if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)(__builtin_constant_p (xi.precision > 64) && (xi.precision
> 64))
3073 ? (STATIC_CONSTANT_P (shift < HOST_BITS_PER_WIDE_INT - 1)(__builtin_constant_p (shift < 64 - 1) && (shift <
64 - 1))
3074 && xi.len == 1
3075 && IN_RANGE (xi.val[0], 0, HOST_WIDE_INT_MAX >> shift)((unsigned long) (xi.val[0]) - (unsigned long) (0) <= (unsigned
long) ((~((long) (1UL << (64 - 1)))) >> shift) -
(unsigned long) (0))
)
3076 : precision <= HOST_BITS_PER_WIDE_INT64)
3077 {
3078 val[0] = xi.ulow () << shift;
3079 result.set_len (1);
3080 }
3081 else
3082 result.set_len (lshift_large (val, xi.val, xi.len,
3083 precision, shift));
3084 }
3085 return result;
3086}
3087
3088/* Return X >> Y, using a logical shift. Return 0 if Y is greater than
3089 or equal to the precision of X. */
3090template <typename T1, typename T2>
3091inline WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type
3092wi::lrshift (const T1 &x, const T2 &y)
3093{
3094 WI_UNARY_RESULT_VAR (result, val, T1, x)typename wi::binary_traits <T1, T1>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T1>
::result_type>::get_binary_result (x, x); long *val = result
.write_val ()
;
3095 /* Do things in the precision of the input rather than the output,
3096 since the result can be no larger than that. */
3097 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x);
3098 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
3099 /* Handle the simple cases quickly. */
3100 if (geu_p (yi, xi.precision))
3101 {
3102 val[0] = 0;
3103 result.set_len (1);
3104 }
3105 else
3106 {
3107 unsigned int shift = yi.to_uhwi ();
3108 /* For fixed-precision integers like offset_int and widest_int,
3109 handle the case where the shift value is constant and the
3110 shifted value is a single nonnegative HWI (meaning that all
3111 bits above the HWI are zero). This is particularly common
3112 for converting a bit count to a byte count.
3113
3114 For variable-precision integers like wide_int, handle HWI
3115 and sub-HWI integers inline. */
3116 if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)(__builtin_constant_p (xi.precision > 64) && (xi.precision
> 64))
3117 ? (shift < HOST_BITS_PER_WIDE_INT64
3118 && xi.len == 1
3119 && xi.val[0] >= 0)
3120 : xi.precision <= HOST_BITS_PER_WIDE_INT64)
3121 {
3122 val[0] = xi.to_uhwi () >> shift;
3123 result.set_len (1);
3124 }
3125 else
3126 result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision,
3127 get_precision (result), shift));
3128 }
3129 return result;
3130}
3131
3132/* Return X >> Y, using an arithmetic shift. Return a sign mask if
3133 Y is greater than or equal to the precision of X. */
3134template <typename T1, typename T2>
3135inline WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type
3136wi::arshift (const T1 &x, const T2 &y)
3137{
3138 WI_UNARY_RESULT_VAR (result, val, T1, x)typename wi::binary_traits <T1, T1>::result_type result
= wi::int_traits <typename wi::binary_traits <T1, T1>
::result_type>::get_binary_result (x, x); long *val = result
.write_val ()
;
3139 /* Do things in the precision of the input rather than the output,
3140 since the result can be no larger than that. */
3141 WIDE_INT_REF_FOR (T1)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T1>::is_sign_extended, wi::int_traits <T1>::host_dependent_precision
> >
xi (x);
3142 WIDE_INT_REF_FOR (T2)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T2>::is_sign_extended, wi::int_traits <T2>::host_dependent_precision
> >
yi (y);
3143 /* Handle the simple cases quickly. */
3144 if (geu_p (yi, xi.precision))
3145 {
3146 val[0] = sign_mask (x);
3147 result.set_len (1);
3148 }
3149 else
3150 {
3151 unsigned int shift = yi.to_uhwi ();
3152 if (xi.precision <= HOST_BITS_PER_WIDE_INT64)
3153 {
3154 val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
3155 result.set_len (1, true);
3156 }
3157 else
3158 result.set_len (arshift_large (val, xi.val, xi.len, xi.precision,
3159 get_precision (result), shift));
3160 }
3161 return result;
3162}
3163
3164/* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a
3165 logical shift otherwise. */
3166template <typename T1, typename T2>
3167inline WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type
3168wi::rshift (const T1 &x, const T2 &y, signop sgn)
3169{
3170 if (sgn == UNSIGNED)
3171 return lrshift (x, y);
3172 else
3173 return arshift (x, y);
3174}
3175
3176/* Return the result of rotating the low WIDTH bits of X left by Y
3177 bits and zero-extending the result. Use a full-width rotate if
3178 WIDTH is zero. */
3179template <typename T1, typename T2>
3180WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type
3181wi::lrotate (const T1 &x, const T2 &y, unsigned int width)
3182{
3183 unsigned int precision = get_binary_precision (x, x);
3184 if (width == 0)
3185 width = precision;
3186 WI_UNARY_RESULT (T2)typename wi::binary_traits <T2, T2>::result_type ymod = umod_trunc (y, width);
3187 WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type left = wi::lshift (x, ymod);
3188 WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type right = wi::lrshift (x, wi::sub (width, ymod));
3189 if (width != precision)
3190 return wi::zext (left, width) | wi::zext (right, width);
3191 return left | right;
3192}
3193
3194/* Return the result of rotating the low WIDTH bits of X right by Y
3195 bits and zero-extending the result. Use a full-width rotate if
3196 WIDTH is zero. */
3197template <typename T1, typename T2>
3198WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type
3199wi::rrotate (const T1 &x, const T2 &y, unsigned int width)
3200{
3201 unsigned int precision = get_binary_precision (x, x);
3202 if (width == 0)
3203 width = precision;
3204 WI_UNARY_RESULT (T2)typename wi::binary_traits <T2, T2>::result_type ymod = umod_trunc (y, width);
3205 WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type right = wi::lrshift (x, ymod);
3206 WI_UNARY_RESULT (T1)typename wi::binary_traits <T1, T1>::result_type left = wi::lshift (x, wi::sub (width, ymod));
3207 if (width != precision)
3208 return wi::zext (left, width) | wi::zext (right, width);
3209 return left | right;
3210}
3211
3212/* Return 0 if the number of 1s in X is even and 1 if the number of 1s
3213 is odd. */
3214inline int
3215wi::parity (const wide_int_ref &x)
3216{
3217 return popcount (x) & 1;
3218}
3219
3220/* Extract WIDTH bits from X, starting at BITPOS. */
3221template <typename T>
3222inline unsigned HOST_WIDE_INTlong
3223wi::extract_uhwi (const T &x, unsigned int bitpos, unsigned int width)
3224{
3225 unsigned precision = get_precision (x);
3226 if (precision < bitpos + width)
3227 precision = bitpos + width;
3228 WIDE_INT_REF_FOR (T)generic_wide_int <wide_int_ref_storage <wi::int_traits <
T>::is_sign_extended, wi::int_traits <T>::host_dependent_precision
> >
xi (x, precision);
3229
3230 /* Handle this rare case after the above, so that we assert about
3231 bogus BITPOS values. */
3232 if (width == 0)
3233 return 0;
3234
3235 unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT64;