File: | build/gcc/wide-int.cc |
Warning: | line 161, column 16 Value stored to 'words' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Operations with very long integers. |
2 | Copyright (C) 2012-2023 Free Software Foundation, Inc. |
3 | Contributed by Kenneth Zadeck <zadeck@naturalbridge.com> |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it |
8 | under the terms of the GNU General Public License as published by the |
9 | Free Software Foundation; either version 3, or (at your option) any |
10 | later version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT |
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "tm.h" |
25 | #include "tree.h" |
26 | #include "selftest.h" |
27 | |
28 | |
29 | #define HOST_BITS_PER_HALF_WIDE_INT32 32 |
30 | #if HOST_BITS_PER_HALF_WIDE_INT32 == HOST_BITS_PER_LONG(8 * 8) |
31 | # define HOST_HALF_WIDE_INTint long |
32 | #elif HOST_BITS_PER_HALF_WIDE_INT32 == HOST_BITS_PER_INT(8 * 4) |
33 | # define HOST_HALF_WIDE_INTint int |
34 | #else |
35 | #error Please add support for HOST_HALF_WIDE_INTint |
36 | #endif |
37 | |
38 | #define W_TYPE_SIZE64 HOST_BITS_PER_WIDE_INT64 |
39 | /* Do not include longlong.h when compiler is clang-based. See PR61146. */ |
40 | #if GCC_VERSION(4 * 1000 + 2) >= 3000 && (W_TYPE_SIZE64 == 32 || defined (__SIZEOF_INT128__16)) && !defined(__clang__1) |
41 | typedef unsigned HOST_HALF_WIDE_INTint UHWtype; |
42 | typedef unsigned HOST_WIDE_INTlong UWtype; |
43 | typedef unsigned int UQItype __attribute__ ((mode (QI))); |
44 | typedef unsigned int USItype __attribute__ ((mode (SI))); |
45 | typedef unsigned int UDItype __attribute__ ((mode (DI))); |
46 | #if W_TYPE_SIZE64 == 32 |
47 | typedef unsigned int UDWtype __attribute__ ((mode (DI))); |
48 | #else |
49 | typedef unsigned int UDWtype __attribute__ ((mode (TI))); |
50 | #endif |
51 | #include "longlong.h" |
52 | #endif |
53 | |
54 | static const HOST_WIDE_INTlong zeros[WIDE_INT_MAX_ELTS(((64*(8)) + 64) / 64)] = {}; |
55 | |
56 | /* |
57 | * Internal utilities. |
58 | */ |
59 | |
60 | /* Quantities to deal with values that hold half of a wide int. Used |
61 | in multiply and divide. */ |
62 | #define HALF_INT_MASK((1L << 32) - 1) ((HOST_WIDE_INT_11L << HOST_BITS_PER_HALF_WIDE_INT32) - 1) |
63 | |
64 | #define BLOCK_OF(TARGET)((TARGET) / 64) ((TARGET) / HOST_BITS_PER_WIDE_INT64) |
65 | #define BLOCKS_NEEDED(PREC)(PREC ? (((PREC) + 64 - 1) / 64) : 1) \ |
66 | (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT64 - 1) / HOST_BITS_PER_WIDE_INT64) : 1) |
67 | #define SIGN_MASK(X)((long) (X) < 0 ? -1 : 0) ((HOST_WIDE_INTlong) (X) < 0 ? -1 : 0) |
68 | |
69 | /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1 |
70 | based on the top existing bit of VAL. */ |
71 | |
72 | static unsigned HOST_WIDE_INTlong |
73 | safe_uhwi (const HOST_WIDE_INTlong *val, unsigned int len, unsigned int i) |
74 | { |
75 | return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1-1L : 0; |
76 | } |
77 | |
78 | /* Convert the integer in VAL to canonical form, returning its new length. |
79 | LEN is the number of blocks currently in VAL and PRECISION is the number |
80 | of bits in the integer it represents. |
81 | |
82 | This function only changes the representation, not the value. */ |
83 | static unsigned int |
84 | canonize (HOST_WIDE_INTlong *val, unsigned int len, unsigned int precision) |
85 | { |
86 | unsigned int blocks_needed = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
87 | HOST_WIDE_INTlong top; |
88 | int i; |
89 | |
90 | if (len > blocks_needed) |
91 | len = blocks_needed; |
92 | |
93 | if (len == 1) |
94 | return len; |
95 | |
96 | top = val[len - 1]; |
97 | if (len * HOST_BITS_PER_WIDE_INT64 > precision) |
98 | val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT64); |
99 | if (top != 0 && top != (HOST_WIDE_INTlong)-1) |
100 | return len; |
101 | |
102 | /* At this point we know that the top is either 0 or -1. Find the |
103 | first block that is not a copy of this. */ |
104 | for (i = len - 2; i >= 0; i--) |
105 | { |
106 | HOST_WIDE_INTlong x = val[i]; |
107 | if (x != top) |
108 | { |
109 | if (SIGN_MASK (x)((long) (x) < 0 ? -1 : 0) == top) |
110 | return i + 1; |
111 | |
112 | /* We need an extra block because the top bit block i does |
113 | not match the extension. */ |
114 | return i + 2; |
115 | } |
116 | } |
117 | |
118 | /* The number is 0 or -1. */ |
119 | return 1; |
120 | } |
121 | |
122 | /* VAL[0] is the unsigned result of an operation. Canonize it by adding |
123 | another 0 block if needed, and return number of blocks needed. */ |
124 | |
125 | static inline unsigned int |
126 | canonize_uhwi (HOST_WIDE_INTlong *val, unsigned int precision) |
127 | { |
128 | if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT64) |
129 | { |
130 | val[1] = 0; |
131 | return 2; |
132 | } |
133 | return 1; |
134 | } |
135 | |
136 | /* |
137 | * Conversion routines in and out of wide_int. |
138 | */ |
139 | |
140 | /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the |
141 | result for an integer with precision PRECISION. Return the length |
142 | of VAL (after any canonization. */ |
143 | unsigned int |
144 | wi::from_array (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
145 | unsigned int xlen, unsigned int precision, bool need_canon) |
146 | { |
147 | for (unsigned i = 0; i < xlen; i++) |
148 | val[i] = xval[i]; |
149 | return need_canon ? canonize (val, xlen, precision) : xlen; |
150 | } |
151 | |
152 | /* Construct a wide int from a buffer of length LEN. BUFFER will be |
153 | read according to byte endianness and word endianness of the target. |
154 | Only the lower BUFFER_LEN bytes of the result are set; the remaining |
155 | high bytes are cleared. */ |
156 | wide_int |
157 | wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len) |
158 | { |
159 | unsigned int precision = buffer_len * BITS_PER_UNIT(8); |
160 | wide_int result = wide_int::create (precision); |
161 | unsigned int words = buffer_len / UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); |
Value stored to 'words' during its initialization is never read | |
162 | |
163 | /* We have to clear all the bits ourself, as we merely or in values |
164 | below. */ |
165 | unsigned int len = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
166 | HOST_WIDE_INTlong *val = result.write_val (); |
167 | for (unsigned int i = 0; i < len; ++i) |
168 | val[i] = 0; |
169 | |
170 | for (unsigned int byte = 0; byte < buffer_len; byte++) |
171 | { |
172 | unsigned int offset; |
173 | unsigned int index; |
174 | unsigned int bitpos = byte * BITS_PER_UNIT(8); |
175 | unsigned HOST_WIDE_INTlong value; |
176 | |
177 | if (buffer_len > UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) |
178 | { |
179 | unsigned int word = byte / UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); |
180 | |
181 | if (WORDS_BIG_ENDIAN0) |
182 | word = (words - 1) - word; |
183 | |
184 | offset = word * UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); |
185 | |
186 | if (BYTES_BIG_ENDIAN0) |
187 | offset += (UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4) - 1) - (byte % UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)); |
188 | else |
189 | offset += byte % UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); |
190 | } |
191 | else |
192 | offset = BYTES_BIG_ENDIAN0 ? (buffer_len - 1) - byte : byte; |
193 | |
194 | value = (unsigned HOST_WIDE_INTlong) buffer[offset]; |
195 | |
196 | index = bitpos / HOST_BITS_PER_WIDE_INT64; |
197 | val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT64); |
198 | } |
199 | |
200 | result.set_len (canonize (val, len, precision)); |
201 | |
202 | return result; |
203 | } |
204 | |
205 | /* Sets RESULT from X, the sign is taken according to SGN. */ |
206 | void |
207 | wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn) |
208 | { |
209 | int len = x.get_len (); |
210 | const HOST_WIDE_INTlong *v = x.get_val (); |
211 | int excess = len * HOST_BITS_PER_WIDE_INT64 - x.get_precision (); |
212 | |
213 | if (wi::neg_p (x, sgn)) |
214 | { |
215 | /* We use ones complement to avoid -x80..0 edge case that - |
216 | won't work on. */ |
217 | HOST_WIDE_INTlong *t = XALLOCAVEC (HOST_WIDE_INT, len)((long *) __builtin_alloca(sizeof (long) * (len))); |
218 | for (int i = 0; i < len; i++) |
219 | t[i] = ~v[i]; |
220 | if (excess > 0) |
221 | t[len - 1] = (unsigned HOST_WIDE_INTlong) t[len - 1] << excess >> excess; |
222 | mpz_import__gmpz_import (result, len, -1, sizeof (HOST_WIDE_INTlong), 0, 0, t); |
223 | mpz_com__gmpz_com (result, result); |
224 | } |
225 | else if (excess > 0) |
226 | { |
227 | HOST_WIDE_INTlong *t = XALLOCAVEC (HOST_WIDE_INT, len)((long *) __builtin_alloca(sizeof (long) * (len))); |
228 | for (int i = 0; i < len - 1; i++) |
229 | t[i] = v[i]; |
230 | t[len - 1] = (unsigned HOST_WIDE_INTlong) v[len - 1] << excess >> excess; |
231 | mpz_import__gmpz_import (result, len, -1, sizeof (HOST_WIDE_INTlong), 0, 0, t); |
232 | } |
233 | else if (excess < 0 && wi::neg_p (x)) |
234 | { |
235 | int extra |
236 | = (-excess + HOST_BITS_PER_WIDE_INT64 - 1) / HOST_BITS_PER_WIDE_INT64; |
237 | HOST_WIDE_INTlong *t = XALLOCAVEC (HOST_WIDE_INT, len + extra)((long *) __builtin_alloca(sizeof (long) * (len + extra))); |
238 | for (int i = 0; i < len; i++) |
239 | t[i] = v[i]; |
240 | for (int i = 0; i < extra; i++) |
241 | t[len + i] = -1; |
242 | excess = (-excess) % HOST_BITS_PER_WIDE_INT64; |
243 | if (excess) |
244 | t[len + extra - 1] = (HOST_WIDE_INT_1U1UL << excess) - 1; |
245 | mpz_import__gmpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INTlong), 0, 0, t); |
246 | } |
247 | else |
248 | mpz_import__gmpz_import (result, len, -1, sizeof (HOST_WIDE_INTlong), 0, 0, v); |
249 | } |
250 | |
251 | /* Returns X converted to TYPE. If WRAP is true, then out-of-range |
252 | values of VAL will be wrapped; otherwise, they will be set to the |
253 | appropriate minimum or maximum TYPE bound. */ |
254 | wide_int |
255 | wi::from_mpz (const_tree type, mpz_t x, bool wrap) |
256 | { |
257 | size_t count, numb; |
258 | unsigned int prec = TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 258, __FUNCTION__))->type_common.precision); |
259 | wide_int res = wide_int::create (prec); |
260 | |
261 | if (!wrap) |
262 | { |
263 | mpz_t min, max; |
264 | |
265 | mpz_init__gmpz_init (min); |
266 | mpz_init__gmpz_init (max); |
267 | get_type_static_bounds (type, min, max); |
268 | |
269 | if (mpz_cmp__gmpz_cmp (x, min) < 0) |
270 | mpz_set__gmpz_set (x, min); |
271 | else if (mpz_cmp__gmpz_cmp (x, max) > 0) |
272 | mpz_set__gmpz_set (x, max); |
273 | |
274 | mpz_clear__gmpz_clear (min); |
275 | mpz_clear__gmpz_clear (max); |
276 | } |
277 | |
278 | /* Determine the number of unsigned HOST_WIDE_INTs that are required |
279 | for representing the absolute value. The code to calculate count is |
280 | extracted from the GMP manual, section "Integer Import and Export": |
281 | http://gmplib.org/manual/Integer-Import-and-Export.html */ |
282 | numb = CHAR_BIT8 * sizeof (HOST_WIDE_INTlong); |
283 | count = (mpz_sizeinbase__gmpz_sizeinbase (x, 2) + numb - 1) / numb; |
284 | HOST_WIDE_INTlong *val = res.write_val (); |
285 | /* Read the absolute value. |
286 | |
287 | Write directly to the wide_int storage if possible, otherwise leave |
288 | GMP to allocate the memory for us. It might be slightly more efficient |
289 | to use mpz_tdiv_r_2exp for the latter case, but the situation is |
290 | pathological and it seems safer to operate on the original mpz value |
291 | in all cases. */ |
292 | void *valres = mpz_export__gmpz_export (count <= WIDE_INT_MAX_ELTS(((64*(8)) + 64) / 64) ? val : 0, |
293 | &count, -1, sizeof (HOST_WIDE_INTlong), 0, 0, x); |
294 | if (count < 1) |
295 | { |
296 | val[0] = 0; |
297 | count = 1; |
298 | } |
299 | count = MIN (count, BLOCKS_NEEDED (prec))((count) < ((prec ? (((prec) + 64 - 1) / 64) : 1)) ? (count ) : ((prec ? (((prec) + 64 - 1) / 64) : 1))); |
300 | if (valres != val) |
301 | { |
302 | memcpy (val, valres, count * sizeof (HOST_WIDE_INTlong)); |
303 | free (valres); |
304 | } |
305 | /* Zero-extend the absolute value to PREC bits. */ |
306 | if (count < BLOCKS_NEEDED (prec)(prec ? (((prec) + 64 - 1) / 64) : 1) && val[count - 1] < 0) |
307 | val[count++] = 0; |
308 | else |
309 | count = canonize (val, count, prec); |
310 | res.set_len (count); |
311 | |
312 | if (mpz_sgn (x)((x)->_mp_size < 0 ? -1 : (x)->_mp_size > 0) < 0) |
313 | res = -res; |
314 | |
315 | return res; |
316 | } |
317 | |
318 | /* |
319 | * Largest and smallest values in a mode. |
320 | */ |
321 | |
322 | /* Return the largest SGNed number that is representable in PRECISION bits. |
323 | |
324 | TODO: There is still code from the double_int era that trys to |
325 | make up for the fact that double int's could not represent the |
326 | min and max values of all types. This code should be removed |
327 | because the min and max values can always be represented in |
328 | wide_ints and int-csts. */ |
329 | wide_int |
330 | wi::max_value (unsigned int precision, signop sgn) |
331 | { |
332 | gcc_checking_assert (precision != 0)((void)(!(precision != 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 332, __FUNCTION__), 0 : 0)); |
333 | if (sgn == UNSIGNED) |
334 | /* The unsigned max is just all ones. */ |
335 | return shwi (-1, precision); |
336 | else |
337 | /* The signed max is all ones except the top bit. This must be |
338 | explicitly represented. */ |
339 | return mask (precision - 1, false, precision); |
340 | } |
341 | |
342 | /* Return the largest SGNed number that is representable in PRECISION bits. */ |
343 | wide_int |
344 | wi::min_value (unsigned int precision, signop sgn) |
345 | { |
346 | gcc_checking_assert (precision != 0)((void)(!(precision != 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 346, __FUNCTION__), 0 : 0)); |
347 | if (sgn == UNSIGNED) |
348 | return uhwi (0, precision); |
349 | else |
350 | /* The signed min is all zeros except the top bit. This must be |
351 | explicitly represented. */ |
352 | return wi::set_bit_in_zero (precision - 1, precision); |
353 | } |
354 | |
355 | /* |
356 | * Public utilities. |
357 | */ |
358 | |
359 | /* Convert the number represented by XVAL, XLEN and XPRECISION, which has |
360 | signedness SGN, to an integer that has PRECISION bits. Store the blocks |
361 | in VAL and return the number of blocks used. |
362 | |
363 | This function can handle both extension (PRECISION > XPRECISION) |
364 | and truncation (PRECISION < XPRECISION). */ |
365 | unsigned int |
366 | wi::force_to_size (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
367 | unsigned int xlen, unsigned int xprecision, |
368 | unsigned int precision, signop sgn) |
369 | { |
370 | unsigned int blocks_needed = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
371 | unsigned int len = blocks_needed < xlen ? blocks_needed : xlen; |
372 | for (unsigned i = 0; i < len; i++) |
373 | val[i] = xval[i]; |
374 | |
375 | if (precision > xprecision) |
376 | { |
377 | unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT64; |
378 | |
379 | /* Expanding. */ |
380 | if (sgn == UNSIGNED) |
381 | { |
382 | if (small_xprecision && len == BLOCKS_NEEDED (xprecision)(xprecision ? (((xprecision) + 64 - 1) / 64) : 1)) |
383 | val[len - 1] = zext_hwi (val[len - 1], small_xprecision); |
384 | else if (val[len - 1] < 0) |
385 | { |
386 | while (len < BLOCKS_NEEDED (xprecision)(xprecision ? (((xprecision) + 64 - 1) / 64) : 1)) |
387 | val[len++] = -1; |
388 | if (small_xprecision) |
389 | val[len - 1] = zext_hwi (val[len - 1], small_xprecision); |
390 | else |
391 | val[len++] = 0; |
392 | } |
393 | } |
394 | else |
395 | { |
396 | if (small_xprecision && len == BLOCKS_NEEDED (xprecision)(xprecision ? (((xprecision) + 64 - 1) / 64) : 1)) |
397 | val[len - 1] = sext_hwi (val[len - 1], small_xprecision); |
398 | } |
399 | } |
400 | len = canonize (val, len, precision); |
401 | |
402 | return len; |
403 | } |
404 | |
405 | /* This function hides the fact that we cannot rely on the bits beyond |
406 | the precision. This issue comes up in the relational comparisions |
407 | where we do allow comparisons of values of different precisions. */ |
408 | static inline HOST_WIDE_INTlong |
409 | selt (const HOST_WIDE_INTlong *a, unsigned int len, |
410 | unsigned int blocks_needed, unsigned int small_prec, |
411 | unsigned int index, signop sgn) |
412 | { |
413 | HOST_WIDE_INTlong val; |
414 | if (index < len) |
415 | val = a[index]; |
416 | else if (index < blocks_needed || sgn == SIGNED) |
417 | /* Signed or within the precision. */ |
418 | val = SIGN_MASK (a[len - 1])((long) (a[len - 1]) < 0 ? -1 : 0); |
419 | else |
420 | /* Unsigned extension beyond the precision. */ |
421 | val = 0; |
422 | |
423 | if (small_prec && index == blocks_needed - 1) |
424 | return (sgn == SIGNED |
425 | ? sext_hwi (val, small_prec) |
426 | : zext_hwi (val, small_prec)); |
427 | else |
428 | return val; |
429 | } |
430 | |
431 | /* Find the highest bit represented in a wide int. This will in |
432 | general have the same value as the sign bit. */ |
433 | static inline HOST_WIDE_INTlong |
434 | top_bit_of (const HOST_WIDE_INTlong *a, unsigned int len, unsigned int prec) |
435 | { |
436 | int excess = len * HOST_BITS_PER_WIDE_INT64 - prec; |
437 | unsigned HOST_WIDE_INTlong val = a[len - 1]; |
438 | if (excess > 0) |
439 | val <<= excess; |
440 | return val >> (HOST_BITS_PER_WIDE_INT64 - 1); |
441 | } |
442 | |
443 | /* |
444 | * Comparisons, note that only equality is an operator. The other |
445 | * comparisons cannot be operators since they are inherently signed or |
446 | * unsigned and C++ has no such operators. |
447 | */ |
448 | |
449 | /* Return true if OP0 == OP1. */ |
450 | bool |
451 | wi::eq_p_large (const HOST_WIDE_INTlong *op0, unsigned int op0len, |
452 | const HOST_WIDE_INTlong *op1, unsigned int op1len, |
453 | unsigned int prec) |
454 | { |
455 | int l0 = op0len - 1; |
456 | unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT64 - 1); |
457 | |
458 | if (op0len != op1len) |
459 | return false; |
460 | |
461 | if (op0len == BLOCKS_NEEDED (prec)(prec ? (((prec) + 64 - 1) / 64) : 1) && small_prec) |
462 | { |
463 | /* It does not matter if we zext or sext here, we just have to |
464 | do both the same way. */ |
465 | if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec)) |
466 | return false; |
467 | l0--; |
468 | } |
469 | |
470 | while (l0 >= 0) |
471 | if (op0[l0] != op1[l0]) |
472 | return false; |
473 | else |
474 | l0--; |
475 | |
476 | return true; |
477 | } |
478 | |
479 | /* Return true if OP0 < OP1 using signed comparisons. */ |
480 | bool |
481 | wi::lts_p_large (const HOST_WIDE_INTlong *op0, unsigned int op0len, |
482 | unsigned int precision, |
483 | const HOST_WIDE_INTlong *op1, unsigned int op1len) |
484 | { |
485 | HOST_WIDE_INTlong s0, s1; |
486 | unsigned HOST_WIDE_INTlong u0, u1; |
487 | unsigned int blocks_needed = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
488 | unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT64 - 1); |
489 | int l = MAX (op0len - 1, op1len - 1)((op0len - 1) > (op1len - 1) ? (op0len - 1) : (op1len - 1) ); |
490 | |
491 | /* Only the top block is compared as signed. The rest are unsigned |
492 | comparisons. */ |
493 | s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED); |
494 | s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED); |
495 | if (s0 < s1) |
496 | return true; |
497 | if (s0 > s1) |
498 | return false; |
499 | |
500 | l--; |
501 | while (l >= 0) |
502 | { |
503 | u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED); |
504 | u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED); |
505 | |
506 | if (u0 < u1) |
507 | return true; |
508 | if (u0 > u1) |
509 | return false; |
510 | l--; |
511 | } |
512 | |
513 | return false; |
514 | } |
515 | |
516 | /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using |
517 | signed compares. */ |
518 | int |
519 | wi::cmps_large (const HOST_WIDE_INTlong *op0, unsigned int op0len, |
520 | unsigned int precision, |
521 | const HOST_WIDE_INTlong *op1, unsigned int op1len) |
522 | { |
523 | HOST_WIDE_INTlong s0, s1; |
524 | unsigned HOST_WIDE_INTlong u0, u1; |
525 | unsigned int blocks_needed = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
526 | unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT64 - 1); |
527 | int l = MAX (op0len - 1, op1len - 1)((op0len - 1) > (op1len - 1) ? (op0len - 1) : (op1len - 1) ); |
528 | |
529 | /* Only the top block is compared as signed. The rest are unsigned |
530 | comparisons. */ |
531 | s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED); |
532 | s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED); |
533 | if (s0 < s1) |
534 | return -1; |
535 | if (s0 > s1) |
536 | return 1; |
537 | |
538 | l--; |
539 | while (l >= 0) |
540 | { |
541 | u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED); |
542 | u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED); |
543 | |
544 | if (u0 < u1) |
545 | return -1; |
546 | if (u0 > u1) |
547 | return 1; |
548 | l--; |
549 | } |
550 | |
551 | return 0; |
552 | } |
553 | |
554 | /* Return true if OP0 < OP1 using unsigned comparisons. */ |
555 | bool |
556 | wi::ltu_p_large (const HOST_WIDE_INTlong *op0, unsigned int op0len, |
557 | unsigned int precision, |
558 | const HOST_WIDE_INTlong *op1, unsigned int op1len) |
559 | { |
560 | unsigned HOST_WIDE_INTlong x0; |
561 | unsigned HOST_WIDE_INTlong x1; |
562 | unsigned int blocks_needed = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
563 | unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT64 - 1); |
564 | int l = MAX (op0len - 1, op1len - 1)((op0len - 1) > (op1len - 1) ? (op0len - 1) : (op1len - 1) ); |
565 | |
566 | while (l >= 0) |
567 | { |
568 | x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED); |
569 | x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED); |
570 | if (x0 < x1) |
571 | return true; |
572 | if (x0 > x1) |
573 | return false; |
574 | l--; |
575 | } |
576 | |
577 | return false; |
578 | } |
579 | |
580 | /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using |
581 | unsigned compares. */ |
582 | int |
583 | wi::cmpu_large (const HOST_WIDE_INTlong *op0, unsigned int op0len, |
584 | unsigned int precision, |
585 | const HOST_WIDE_INTlong *op1, unsigned int op1len) |
586 | { |
587 | unsigned HOST_WIDE_INTlong x0; |
588 | unsigned HOST_WIDE_INTlong x1; |
589 | unsigned int blocks_needed = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
590 | unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT64 - 1); |
591 | int l = MAX (op0len - 1, op1len - 1)((op0len - 1) > (op1len - 1) ? (op0len - 1) : (op1len - 1) ); |
592 | |
593 | while (l >= 0) |
594 | { |
595 | x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED); |
596 | x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED); |
597 | if (x0 < x1) |
598 | return -1; |
599 | if (x0 > x1) |
600 | return 1; |
601 | l--; |
602 | } |
603 | |
604 | return 0; |
605 | } |
606 | |
607 | /* |
608 | * Extension. |
609 | */ |
610 | |
611 | /* Sign-extend the number represented by XVAL and XLEN into VAL, |
612 | starting at OFFSET. Return the number of blocks in VAL. Both XVAL |
613 | and VAL have PRECISION bits. */ |
614 | unsigned int |
615 | wi::sext_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
616 | unsigned int xlen, unsigned int precision, unsigned int offset) |
617 | { |
618 | unsigned int len = offset / HOST_BITS_PER_WIDE_INT64; |
619 | /* Extending beyond the precision is a no-op. If we have only stored |
620 | OFFSET bits or fewer, the rest are already signs. */ |
621 | if (offset >= precision || len >= xlen) |
622 | { |
623 | for (unsigned i = 0; i < xlen; ++i) |
624 | val[i] = xval[i]; |
625 | return xlen; |
626 | } |
627 | unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT64; |
628 | for (unsigned int i = 0; i < len; i++) |
629 | val[i] = xval[i]; |
630 | if (suboffset > 0) |
631 | { |
632 | val[len] = sext_hwi (xval[len], suboffset); |
633 | len += 1; |
634 | } |
635 | return canonize (val, len, precision); |
636 | } |
637 | |
638 | /* Zero-extend the number represented by XVAL and XLEN into VAL, |
639 | starting at OFFSET. Return the number of blocks in VAL. Both XVAL |
640 | and VAL have PRECISION bits. */ |
641 | unsigned int |
642 | wi::zext_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
643 | unsigned int xlen, unsigned int precision, unsigned int offset) |
644 | { |
645 | unsigned int len = offset / HOST_BITS_PER_WIDE_INT64; |
646 | /* Extending beyond the precision is a no-op. If we have only stored |
647 | OFFSET bits or fewer, and the upper stored bit is zero, then there |
648 | is nothing to do. */ |
649 | if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0)) |
650 | { |
651 | for (unsigned i = 0; i < xlen; ++i) |
652 | val[i] = xval[i]; |
653 | return xlen; |
654 | } |
655 | unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT64; |
656 | for (unsigned int i = 0; i < len; i++) |
657 | val[i] = i < xlen ? xval[i] : -1; |
658 | if (suboffset > 0) |
659 | val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset); |
660 | else |
661 | val[len] = 0; |
662 | return canonize (val, len + 1, precision); |
663 | } |
664 | |
665 | /* |
666 | * Masking, inserting, shifting, rotating. |
667 | */ |
668 | |
669 | /* Insert WIDTH bits from Y into X starting at START. */ |
670 | wide_int |
671 | wi::insert (const wide_int &x, const wide_int &y, unsigned int start, |
672 | unsigned int width) |
673 | { |
674 | wide_int result; |
675 | wide_int mask; |
676 | wide_int tmp; |
677 | |
678 | unsigned int precision = x.get_precision (); |
679 | if (start >= precision) |
680 | return x; |
681 | |
682 | gcc_checking_assert (precision >= width)((void)(!(precision >= width) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 682, __FUNCTION__), 0 : 0)); |
683 | |
684 | if (start + width >= precision) |
685 | width = precision - start; |
686 | |
687 | mask = wi::shifted_mask (start, width, false, precision); |
688 | tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start); |
689 | result = tmp & mask; |
690 | |
691 | tmp = wi::bit_and_not (x, mask); |
692 | result = result | tmp; |
693 | |
694 | return result; |
695 | } |
696 | |
697 | /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT. |
698 | Return the number of blocks in VAL. Both XVAL and VAL have PRECISION |
699 | bits. */ |
700 | unsigned int |
701 | wi::set_bit_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
702 | unsigned int xlen, unsigned int precision, unsigned int bit) |
703 | { |
704 | unsigned int block = bit / HOST_BITS_PER_WIDE_INT64; |
705 | unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT64; |
706 | |
707 | if (block + 1 >= xlen) |
708 | { |
709 | /* The operation either affects the last current block or needs |
710 | a new block. */ |
711 | unsigned int len = block + 1; |
712 | for (unsigned int i = 0; i < len; i++) |
713 | val[i] = safe_uhwi (xval, xlen, i); |
714 | val[block] |= HOST_WIDE_INT_1U1UL << subbit; |
715 | |
716 | /* If the bit we just set is at the msb of the block, make sure |
717 | that any higher bits are zeros. */ |
718 | if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT64 - 1) |
719 | { |
720 | val[len++] = 0; |
721 | return len; |
722 | } |
723 | return canonize (val, len, precision); |
724 | } |
725 | else |
726 | { |
727 | for (unsigned int i = 0; i < xlen; i++) |
728 | val[i] = xval[i]; |
729 | val[block] |= HOST_WIDE_INT_1U1UL << subbit; |
730 | return canonize (val, xlen, precision); |
731 | } |
732 | } |
733 | |
734 | /* bswap THIS. */ |
735 | wide_int |
736 | wide_int_storage::bswap () const |
737 | { |
738 | wide_int result = wide_int::create (precision); |
739 | unsigned int i, s; |
740 | unsigned int len = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
741 | unsigned int xlen = get_len (); |
742 | const HOST_WIDE_INTlong *xval = get_val (); |
743 | HOST_WIDE_INTlong *val = result.write_val (); |
744 | |
745 | /* This is not a well defined operation if the precision is not a |
746 | multiple of 8. */ |
747 | gcc_assert ((precision & 0x7) == 0)((void)(!((precision & 0x7) == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 747, __FUNCTION__), 0 : 0)); |
748 | |
749 | for (i = 0; i < len; i++) |
750 | val[i] = 0; |
751 | |
752 | /* Only swap the bytes that are not the padding. */ |
753 | for (s = 0; s < precision; s += 8) |
754 | { |
755 | unsigned int d = precision - s - 8; |
756 | unsigned HOST_WIDE_INTlong byte; |
757 | |
758 | unsigned int block = s / HOST_BITS_PER_WIDE_INT64; |
759 | unsigned int offset = s & (HOST_BITS_PER_WIDE_INT64 - 1); |
760 | |
761 | byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff; |
762 | |
763 | block = d / HOST_BITS_PER_WIDE_INT64; |
764 | offset = d & (HOST_BITS_PER_WIDE_INT64 - 1); |
765 | |
766 | val[block] |= byte << offset; |
767 | } |
768 | |
769 | result.set_len (canonize (val, len, precision)); |
770 | return result; |
771 | } |
772 | |
773 | /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits |
774 | above that up to PREC are zeros. The result is inverted if NEGATE |
775 | is true. Return the number of blocks in VAL. */ |
776 | unsigned int |
777 | wi::mask (HOST_WIDE_INTlong *val, unsigned int width, bool negate, |
778 | unsigned int prec) |
779 | { |
780 | if (width >= prec) |
781 | { |
782 | val[0] = negate ? 0 : -1; |
783 | return 1; |
784 | } |
785 | else if (width == 0) |
786 | { |
787 | val[0] = negate ? -1 : 0; |
788 | return 1; |
789 | } |
790 | |
791 | unsigned int i = 0; |
792 | while (i < width / HOST_BITS_PER_WIDE_INT64) |
793 | val[i++] = negate ? 0 : -1; |
794 | |
795 | unsigned int shift = width & (HOST_BITS_PER_WIDE_INT64 - 1); |
796 | if (shift != 0) |
797 | { |
798 | HOST_WIDE_INTlong last = (HOST_WIDE_INT_1U1UL << shift) - 1; |
799 | val[i++] = negate ? ~last : last; |
800 | } |
801 | else |
802 | val[i++] = negate ? -1 : 0; |
803 | |
804 | return i; |
805 | } |
806 | |
807 | /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH |
808 | bits are ones, and the bits above that up to PREC are zeros. The result |
809 | is inverted if NEGATE is true. Return the number of blocks in VAL. */ |
810 | unsigned int |
811 | wi::shifted_mask (HOST_WIDE_INTlong *val, unsigned int start, unsigned int width, |
812 | bool negate, unsigned int prec) |
813 | { |
814 | if (start >= prec || width == 0) |
815 | { |
816 | val[0] = negate ? -1 : 0; |
817 | return 1; |
818 | } |
819 | |
820 | if (width > prec - start) |
821 | width = prec - start; |
822 | unsigned int end = start + width; |
823 | |
824 | unsigned int i = 0; |
825 | while (i < start / HOST_BITS_PER_WIDE_INT64) |
826 | val[i++] = negate ? -1 : 0; |
827 | |
828 | unsigned int shift = start & (HOST_BITS_PER_WIDE_INT64 - 1); |
829 | if (shift) |
830 | { |
831 | HOST_WIDE_INTlong block = (HOST_WIDE_INT_1U1UL << shift) - 1; |
832 | shift += width; |
833 | if (shift < HOST_BITS_PER_WIDE_INT64) |
834 | { |
835 | /* case 000111000 */ |
836 | block = (HOST_WIDE_INT_1U1UL << shift) - block - 1; |
837 | val[i++] = negate ? ~block : block; |
838 | return i; |
839 | } |
840 | else |
841 | /* ...111000 */ |
842 | val[i++] = negate ? block : ~block; |
843 | } |
844 | |
845 | if (end >= prec) |
846 | { |
847 | if (!shift) |
848 | val[i++] = negate ? 0 : -1; |
849 | return i; |
850 | } |
851 | |
852 | while (i < end / HOST_BITS_PER_WIDE_INT64) |
853 | /* 1111111 */ |
854 | val[i++] = negate ? 0 : -1; |
855 | |
856 | shift = end & (HOST_BITS_PER_WIDE_INT64 - 1); |
857 | if (shift != 0) |
858 | { |
859 | /* 000011111 */ |
860 | HOST_WIDE_INTlong block = (HOST_WIDE_INT_1U1UL << shift) - 1; |
861 | val[i++] = negate ? ~block : block; |
862 | } |
863 | else |
864 | val[i++] = negate ? -1 : 0; |
865 | |
866 | return i; |
867 | } |
868 | |
869 | /* |
870 | * logical operations. |
871 | */ |
872 | |
873 | /* Set VAL to OP0 & OP1. Return the number of blocks used. */ |
874 | unsigned int |
875 | wi::and_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *op0, |
876 | unsigned int op0len, const HOST_WIDE_INTlong *op1, |
877 | unsigned int op1len, unsigned int prec) |
878 | { |
879 | int l0 = op0len - 1; |
880 | int l1 = op1len - 1; |
881 | bool need_canon = true; |
882 | |
883 | unsigned int len = MAX (op0len, op1len)((op0len) > (op1len) ? (op0len) : (op1len)); |
884 | if (l0 > l1) |
885 | { |
886 | HOST_WIDE_INTlong op1mask = -top_bit_of (op1, op1len, prec); |
887 | if (op1mask == 0) |
888 | { |
889 | l0 = l1; |
890 | len = l1 + 1; |
891 | } |
892 | else |
893 | { |
894 | need_canon = false; |
895 | while (l0 > l1) |
896 | { |
897 | val[l0] = op0[l0]; |
898 | l0--; |
899 | } |
900 | } |
901 | } |
902 | else if (l1 > l0) |
903 | { |
904 | HOST_WIDE_INTlong op0mask = -top_bit_of (op0, op0len, prec); |
905 | if (op0mask == 0) |
906 | len = l0 + 1; |
907 | else |
908 | { |
909 | need_canon = false; |
910 | while (l1 > l0) |
911 | { |
912 | val[l1] = op1[l1]; |
913 | l1--; |
914 | } |
915 | } |
916 | } |
917 | |
918 | while (l0 >= 0) |
919 | { |
920 | val[l0] = op0[l0] & op1[l0]; |
921 | l0--; |
922 | } |
923 | |
924 | if (need_canon) |
925 | len = canonize (val, len, prec); |
926 | |
927 | return len; |
928 | } |
929 | |
930 | /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */ |
931 | unsigned int |
932 | wi::and_not_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *op0, |
933 | unsigned int op0len, const HOST_WIDE_INTlong *op1, |
934 | unsigned int op1len, unsigned int prec) |
935 | { |
936 | wide_int result; |
937 | int l0 = op0len - 1; |
938 | int l1 = op1len - 1; |
939 | bool need_canon = true; |
940 | |
941 | unsigned int len = MAX (op0len, op1len)((op0len) > (op1len) ? (op0len) : (op1len)); |
942 | if (l0 > l1) |
943 | { |
944 | HOST_WIDE_INTlong op1mask = -top_bit_of (op1, op1len, prec); |
945 | if (op1mask != 0) |
946 | { |
947 | l0 = l1; |
948 | len = l1 + 1; |
949 | } |
950 | else |
951 | { |
952 | need_canon = false; |
953 | while (l0 > l1) |
954 | { |
955 | val[l0] = op0[l0]; |
956 | l0--; |
957 | } |
958 | } |
959 | } |
960 | else if (l1 > l0) |
961 | { |
962 | HOST_WIDE_INTlong op0mask = -top_bit_of (op0, op0len, prec); |
963 | if (op0mask == 0) |
964 | len = l0 + 1; |
965 | else |
966 | { |
967 | need_canon = false; |
968 | while (l1 > l0) |
969 | { |
970 | val[l1] = ~op1[l1]; |
971 | l1--; |
972 | } |
973 | } |
974 | } |
975 | |
976 | while (l0 >= 0) |
977 | { |
978 | val[l0] = op0[l0] & ~op1[l0]; |
979 | l0--; |
980 | } |
981 | |
982 | if (need_canon) |
983 | len = canonize (val, len, prec); |
984 | |
985 | return len; |
986 | } |
987 | |
988 | /* Set VAL to OP0 | OP1. Return the number of blocks used. */ |
989 | unsigned int |
990 | wi::or_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *op0, |
991 | unsigned int op0len, const HOST_WIDE_INTlong *op1, |
992 | unsigned int op1len, unsigned int prec) |
993 | { |
994 | wide_int result; |
995 | int l0 = op0len - 1; |
996 | int l1 = op1len - 1; |
997 | bool need_canon = true; |
998 | |
999 | unsigned int len = MAX (op0len, op1len)((op0len) > (op1len) ? (op0len) : (op1len)); |
1000 | if (l0 > l1) |
1001 | { |
1002 | HOST_WIDE_INTlong op1mask = -top_bit_of (op1, op1len, prec); |
1003 | if (op1mask != 0) |
1004 | { |
1005 | l0 = l1; |
1006 | len = l1 + 1; |
1007 | } |
1008 | else |
1009 | { |
1010 | need_canon = false; |
1011 | while (l0 > l1) |
1012 | { |
1013 | val[l0] = op0[l0]; |
1014 | l0--; |
1015 | } |
1016 | } |
1017 | } |
1018 | else if (l1 > l0) |
1019 | { |
1020 | HOST_WIDE_INTlong op0mask = -top_bit_of (op0, op0len, prec); |
1021 | if (op0mask != 0) |
1022 | len = l0 + 1; |
1023 | else |
1024 | { |
1025 | need_canon = false; |
1026 | while (l1 > l0) |
1027 | { |
1028 | val[l1] = op1[l1]; |
1029 | l1--; |
1030 | } |
1031 | } |
1032 | } |
1033 | |
1034 | while (l0 >= 0) |
1035 | { |
1036 | val[l0] = op0[l0] | op1[l0]; |
1037 | l0--; |
1038 | } |
1039 | |
1040 | if (need_canon) |
1041 | len = canonize (val, len, prec); |
1042 | |
1043 | return len; |
1044 | } |
1045 | |
1046 | /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */ |
1047 | unsigned int |
1048 | wi::or_not_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *op0, |
1049 | unsigned int op0len, const HOST_WIDE_INTlong *op1, |
1050 | unsigned int op1len, unsigned int prec) |
1051 | { |
1052 | wide_int result; |
1053 | int l0 = op0len - 1; |
1054 | int l1 = op1len - 1; |
1055 | bool need_canon = true; |
1056 | |
1057 | unsigned int len = MAX (op0len, op1len)((op0len) > (op1len) ? (op0len) : (op1len)); |
1058 | if (l0 > l1) |
1059 | { |
1060 | HOST_WIDE_INTlong op1mask = -top_bit_of (op1, op1len, prec); |
1061 | if (op1mask == 0) |
1062 | { |
1063 | l0 = l1; |
1064 | len = l1 + 1; |
1065 | } |
1066 | else |
1067 | { |
1068 | need_canon = false; |
1069 | while (l0 > l1) |
1070 | { |
1071 | val[l0] = op0[l0]; |
1072 | l0--; |
1073 | } |
1074 | } |
1075 | } |
1076 | else if (l1 > l0) |
1077 | { |
1078 | HOST_WIDE_INTlong op0mask = -top_bit_of (op0, op0len, prec); |
1079 | if (op0mask != 0) |
1080 | len = l0 + 1; |
1081 | else |
1082 | { |
1083 | need_canon = false; |
1084 | while (l1 > l0) |
1085 | { |
1086 | val[l1] = ~op1[l1]; |
1087 | l1--; |
1088 | } |
1089 | } |
1090 | } |
1091 | |
1092 | while (l0 >= 0) |
1093 | { |
1094 | val[l0] = op0[l0] | ~op1[l0]; |
1095 | l0--; |
1096 | } |
1097 | |
1098 | if (need_canon) |
1099 | len = canonize (val, len, prec); |
1100 | |
1101 | return len; |
1102 | } |
1103 | |
1104 | /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */ |
1105 | unsigned int |
1106 | wi::xor_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *op0, |
1107 | unsigned int op0len, const HOST_WIDE_INTlong *op1, |
1108 | unsigned int op1len, unsigned int prec) |
1109 | { |
1110 | wide_int result; |
1111 | int l0 = op0len - 1; |
1112 | int l1 = op1len - 1; |
1113 | |
1114 | unsigned int len = MAX (op0len, op1len)((op0len) > (op1len) ? (op0len) : (op1len)); |
1115 | if (l0 > l1) |
1116 | { |
1117 | HOST_WIDE_INTlong op1mask = -top_bit_of (op1, op1len, prec); |
1118 | while (l0 > l1) |
1119 | { |
1120 | val[l0] = op0[l0] ^ op1mask; |
1121 | l0--; |
1122 | } |
1123 | } |
1124 | |
1125 | if (l1 > l0) |
1126 | { |
1127 | HOST_WIDE_INTlong op0mask = -top_bit_of (op0, op0len, prec); |
1128 | while (l1 > l0) |
1129 | { |
1130 | val[l1] = op0mask ^ op1[l1]; |
1131 | l1--; |
1132 | } |
1133 | } |
1134 | |
1135 | while (l0 >= 0) |
1136 | { |
1137 | val[l0] = op0[l0] ^ op1[l0]; |
1138 | l0--; |
1139 | } |
1140 | |
1141 | return canonize (val, len, prec); |
1142 | } |
1143 | |
1144 | /* |
1145 | * math |
1146 | */ |
1147 | |
1148 | /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW |
1149 | whether the result overflows when OP0 and OP1 are treated as having |
1150 | signedness SGN. Return the number of blocks in VAL. */ |
1151 | unsigned int |
1152 | wi::add_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *op0, |
1153 | unsigned int op0len, const HOST_WIDE_INTlong *op1, |
1154 | unsigned int op1len, unsigned int prec, |
1155 | signop sgn, wi::overflow_type *overflow) |
1156 | { |
1157 | unsigned HOST_WIDE_INTlong o0 = 0; |
1158 | unsigned HOST_WIDE_INTlong o1 = 0; |
1159 | unsigned HOST_WIDE_INTlong x = 0; |
1160 | unsigned HOST_WIDE_INTlong carry = 0; |
1161 | unsigned HOST_WIDE_INTlong old_carry = 0; |
1162 | unsigned HOST_WIDE_INTlong mask0, mask1; |
1163 | unsigned int i; |
1164 | |
1165 | unsigned int len = MAX (op0len, op1len)((op0len) > (op1len) ? (op0len) : (op1len)); |
1166 | mask0 = -top_bit_of (op0, op0len, prec); |
1167 | mask1 = -top_bit_of (op1, op1len, prec); |
1168 | /* Add all of the explicitly defined elements. */ |
1169 | |
1170 | for (i = 0; i < len; i++) |
1171 | { |
1172 | o0 = i < op0len ? (unsigned HOST_WIDE_INTlong) op0[i] : mask0; |
1173 | o1 = i < op1len ? (unsigned HOST_WIDE_INTlong) op1[i] : mask1; |
1174 | x = o0 + o1 + carry; |
1175 | val[i] = x; |
1176 | old_carry = carry; |
1177 | carry = carry == 0 ? x < o0 : x <= o0; |
1178 | } |
1179 | |
1180 | if (len * HOST_BITS_PER_WIDE_INT64 < prec) |
1181 | { |
1182 | val[len] = mask0 + mask1 + carry; |
1183 | len++; |
1184 | if (overflow) |
1185 | *overflow |
1186 | = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE; |
1187 | } |
1188 | else if (overflow) |
1189 | { |
1190 | unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT64; |
1191 | if (sgn == SIGNED) |
1192 | { |
1193 | unsigned HOST_WIDE_INTlong x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1); |
1194 | if ((HOST_WIDE_INTlong) (x << shift) < 0) |
1195 | { |
1196 | if (o0 > (unsigned HOST_WIDE_INTlong) val[len - 1]) |
1197 | *overflow = wi::OVF_UNDERFLOW; |
1198 | else if (o0 < (unsigned HOST_WIDE_INTlong) val[len - 1]) |
1199 | *overflow = wi::OVF_OVERFLOW; |
1200 | else |
1201 | *overflow = wi::OVF_NONE; |
1202 | } |
1203 | else |
1204 | *overflow = wi::OVF_NONE; |
1205 | } |
1206 | else |
1207 | { |
1208 | /* Put the MSB of X and O0 and in the top of the HWI. */ |
1209 | x <<= shift; |
1210 | o0 <<= shift; |
1211 | if (old_carry) |
1212 | *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE; |
1213 | else |
1214 | *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE; |
1215 | } |
1216 | } |
1217 | |
1218 | return canonize (val, len, prec); |
1219 | } |
1220 | |
1221 | /* Subroutines of the multiplication and division operations. Unpack |
1222 | the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN |
1223 | HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by |
1224 | uncompressing the top bit of INPUT[IN_LEN - 1]. */ |
1225 | static void |
1226 | wi_unpack (unsigned HOST_HALF_WIDE_INTint *result, const HOST_WIDE_INTlong *input, |
1227 | unsigned int in_len, unsigned int out_len, |
1228 | unsigned int prec, signop sgn) |
1229 | { |
1230 | unsigned int i; |
1231 | unsigned int j = 0; |
1232 | unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT64 - 1); |
1233 | unsigned int blocks_needed = BLOCKS_NEEDED (prec)(prec ? (((prec) + 64 - 1) / 64) : 1); |
1234 | HOST_WIDE_INTlong mask; |
1235 | |
1236 | if (sgn == SIGNED) |
1237 | { |
1238 | mask = -top_bit_of ((const HOST_WIDE_INTlong *) input, in_len, prec); |
1239 | mask &= HALF_INT_MASK((1L << 32) - 1); |
1240 | } |
1241 | else |
1242 | mask = 0; |
1243 | |
1244 | for (i = 0; i < blocks_needed - 1; i++) |
1245 | { |
1246 | HOST_WIDE_INTlong x = safe_uhwi (input, in_len, i); |
1247 | result[j++] = x; |
1248 | result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT32; |
1249 | } |
1250 | |
1251 | HOST_WIDE_INTlong x = safe_uhwi (input, in_len, i); |
1252 | if (small_prec) |
1253 | { |
1254 | if (sgn == SIGNED) |
1255 | x = sext_hwi (x, small_prec); |
1256 | else |
1257 | x = zext_hwi (x, small_prec); |
1258 | } |
1259 | result[j++] = x; |
1260 | result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT32; |
1261 | |
1262 | /* Smear the sign bit. */ |
1263 | while (j < out_len) |
1264 | result[j++] = mask; |
1265 | } |
1266 | |
1267 | /* The inverse of wi_unpack. IN_LEN is the number of input |
1268 | blocks and PRECISION is the precision of the result. Return the |
1269 | number of blocks in the canonicalized result. */ |
1270 | static unsigned int |
1271 | wi_pack (HOST_WIDE_INTlong *result, |
1272 | const unsigned HOST_HALF_WIDE_INTint *input, |
1273 | unsigned int in_len, unsigned int precision) |
1274 | { |
1275 | unsigned int i = 0; |
1276 | unsigned int j = 0; |
1277 | unsigned int blocks_needed = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
1278 | |
1279 | while (i + 1 < in_len) |
1280 | { |
1281 | result[j++] = ((unsigned HOST_WIDE_INTlong) input[i] |
1282 | | ((unsigned HOST_WIDE_INTlong) input[i + 1] |
1283 | << HOST_BITS_PER_HALF_WIDE_INT32)); |
1284 | i += 2; |
1285 | } |
1286 | |
1287 | /* Handle the case where in_len is odd. For this we zero extend. */ |
1288 | if (in_len & 1) |
1289 | result[j++] = (unsigned HOST_WIDE_INTlong) input[i]; |
1290 | else if (j < blocks_needed) |
1291 | result[j++] = 0; |
1292 | return canonize (result, j, precision); |
1293 | } |
1294 | |
1295 | /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the |
1296 | result is returned. |
1297 | |
1298 | If HIGH is not set, throw away the upper half after the check is |
1299 | made to see if it overflows. Unfortunately there is no better way |
1300 | to check for overflow than to do this. If OVERFLOW is nonnull, |
1301 | record in *OVERFLOW whether the result overflowed. SGN controls |
1302 | the signedness and is used to check overflow or if HIGH is set. |
1303 | |
1304 | NOTE: Overflow type for signed overflow is not yet implemented. */ |
1305 | unsigned int |
1306 | wi::mul_internal (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *op1val, |
1307 | unsigned int op1len, const HOST_WIDE_INTlong *op2val, |
1308 | unsigned int op2len, unsigned int prec, signop sgn, |
1309 | wi::overflow_type *overflow, bool high) |
1310 | { |
1311 | unsigned HOST_WIDE_INTlong o0, o1, k, t; |
1312 | unsigned int i; |
1313 | unsigned int j; |
1314 | unsigned int blocks_needed = BLOCKS_NEEDED (prec)(prec ? (((prec) + 64 - 1) / 64) : 1); |
1315 | unsigned int half_blocks_needed = blocks_needed * 2; |
1316 | /* The sizes here are scaled to support a 2x largest mode by 2x |
1317 | largest mode yielding a 4x largest mode result. This is what is |
1318 | needed by vpn. */ |
1319 | |
1320 | unsigned HOST_HALF_WIDE_INTint |
1321 | u[4 * MAX_BITSIZE_MODE_ANY_INT(64*(8)) / HOST_BITS_PER_HALF_WIDE_INT32]; |
1322 | unsigned HOST_HALF_WIDE_INTint |
1323 | v[4 * MAX_BITSIZE_MODE_ANY_INT(64*(8)) / HOST_BITS_PER_HALF_WIDE_INT32]; |
1324 | /* The '2' in 'R' is because we are internally doing a full |
1325 | multiply. */ |
1326 | unsigned HOST_HALF_WIDE_INTint |
1327 | r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT(64*(8)) / HOST_BITS_PER_HALF_WIDE_INT32]; |
1328 | HOST_WIDE_INTlong mask = ((HOST_WIDE_INTlong)1 << HOST_BITS_PER_HALF_WIDE_INT32) - 1; |
1329 | |
1330 | /* If the top level routine did not really pass in an overflow, then |
1331 | just make sure that we never attempt to set it. */ |
1332 | bool needs_overflow = (overflow != 0); |
1333 | if (needs_overflow) |
1334 | *overflow = wi::OVF_NONE; |
1335 | |
1336 | wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec); |
1337 | wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec); |
1338 | |
1339 | /* This is a surprisingly common case, so do it first. */ |
1340 | if (op1 == 0 || op2 == 0) |
1341 | { |
1342 | val[0] = 0; |
1343 | return 1; |
1344 | } |
1345 | |
1346 | #ifdef umul_ppmm |
1347 | if (sgn == UNSIGNED) |
1348 | { |
1349 | /* If the inputs are single HWIs and the output has room for at |
1350 | least two HWIs, we can use umul_ppmm directly. */ |
1351 | if (prec >= HOST_BITS_PER_WIDE_INT64 * 2 |
1352 | && wi::fits_uhwi_p (op1) |
1353 | && wi::fits_uhwi_p (op2)) |
1354 | { |
1355 | /* This case never overflows. */ |
1356 | if (high) |
1357 | { |
1358 | val[0] = 0; |
1359 | return 1; |
1360 | } |
1361 | umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ()); |
1362 | if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT64 * 2) |
1363 | { |
1364 | val[2] = 0; |
1365 | return 3; |
1366 | } |
1367 | return 1 + (val[1] != 0 || val[0] < 0); |
1368 | } |
1369 | /* Likewise if the output is a full single HWI, except that the |
1370 | upper HWI of the result is only used for determining overflow. |
1371 | (We handle this case inline when overflow isn't needed.) */ |
1372 | else if (prec == HOST_BITS_PER_WIDE_INT64) |
1373 | { |
1374 | unsigned HOST_WIDE_INTlong upper; |
1375 | umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ()); |
1376 | if (needs_overflow) |
1377 | /* Unsigned overflow can only be +OVERFLOW. */ |
1378 | *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE; |
1379 | if (high) |
1380 | val[0] = upper; |
1381 | return 1; |
1382 | } |
1383 | } |
1384 | #endif |
1385 | |
1386 | /* Handle multiplications by 1. */ |
1387 | if (op1 == 1) |
1388 | { |
1389 | if (high) |
1390 | { |
1391 | val[0] = wi::neg_p (op2, sgn) ? -1 : 0; |
1392 | return 1; |
1393 | } |
1394 | for (i = 0; i < op2len; i++) |
1395 | val[i] = op2val[i]; |
1396 | return op2len; |
1397 | } |
1398 | if (op2 == 1) |
1399 | { |
1400 | if (high) |
1401 | { |
1402 | val[0] = wi::neg_p (op1, sgn) ? -1 : 0; |
1403 | return 1; |
1404 | } |
1405 | for (i = 0; i < op1len; i++) |
1406 | val[i] = op1val[i]; |
1407 | return op1len; |
1408 | } |
1409 | |
1410 | /* If we need to check for overflow, we can only do half wide |
1411 | multiplies quickly because we need to look at the top bits to |
1412 | check for the overflow. */ |
1413 | if ((high || needs_overflow) |
1414 | && (prec <= HOST_BITS_PER_HALF_WIDE_INT32)) |
1415 | { |
1416 | unsigned HOST_WIDE_INTlong r; |
1417 | |
1418 | if (sgn == SIGNED) |
1419 | { |
1420 | o0 = op1.to_shwi (); |
1421 | o1 = op2.to_shwi (); |
1422 | } |
1423 | else |
1424 | { |
1425 | o0 = op1.to_uhwi (); |
1426 | o1 = op2.to_uhwi (); |
1427 | } |
1428 | |
1429 | r = o0 * o1; |
1430 | if (needs_overflow) |
1431 | { |
1432 | if (sgn == SIGNED) |
1433 | { |
1434 | if ((HOST_WIDE_INTlong) r != sext_hwi (r, prec)) |
1435 | /* FIXME: Signed overflow type is not implemented yet. */ |
1436 | *overflow = OVF_UNKNOWN; |
1437 | } |
1438 | else |
1439 | { |
1440 | if ((r >> prec) != 0) |
1441 | /* Unsigned overflow can only be +OVERFLOW. */ |
1442 | *overflow = OVF_OVERFLOW; |
1443 | } |
1444 | } |
1445 | val[0] = high ? r >> prec : r; |
1446 | return 1; |
1447 | } |
1448 | |
1449 | /* We do unsigned mul and then correct it. */ |
1450 | wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED); |
1451 | wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED); |
1452 | |
1453 | /* The 2 is for a full mult. */ |
1454 | memset (r, 0, half_blocks_needed * 2 |
1455 | * HOST_BITS_PER_HALF_WIDE_INT32 / CHAR_BIT8); |
1456 | |
1457 | for (j = 0; j < half_blocks_needed; j++) |
1458 | { |
1459 | k = 0; |
1460 | for (i = 0; i < half_blocks_needed; i++) |
1461 | { |
1462 | t = ((unsigned HOST_WIDE_INTlong)u[i] * (unsigned HOST_WIDE_INTlong)v[j] |
1463 | + r[i + j] + k); |
1464 | r[i + j] = t & HALF_INT_MASK((1L << 32) - 1); |
1465 | k = t >> HOST_BITS_PER_HALF_WIDE_INT32; |
1466 | } |
1467 | r[j + half_blocks_needed] = k; |
1468 | } |
1469 | |
1470 | /* We did unsigned math above. For signed we must adjust the |
1471 | product (assuming we need to see that). */ |
1472 | if (sgn == SIGNED && (high || needs_overflow)) |
1473 | { |
1474 | unsigned HOST_WIDE_INTlong b; |
1475 | if (wi::neg_p (op1)) |
1476 | { |
1477 | b = 0; |
1478 | for (i = 0; i < half_blocks_needed; i++) |
1479 | { |
1480 | t = (unsigned HOST_WIDE_INTlong)r[i + half_blocks_needed] |
1481 | - (unsigned HOST_WIDE_INTlong)v[i] - b; |
1482 | r[i + half_blocks_needed] = t & HALF_INT_MASK((1L << 32) - 1); |
1483 | b = t >> (HOST_BITS_PER_WIDE_INT64 - 1); |
1484 | } |
1485 | } |
1486 | if (wi::neg_p (op2)) |
1487 | { |
1488 | b = 0; |
1489 | for (i = 0; i < half_blocks_needed; i++) |
1490 | { |
1491 | t = (unsigned HOST_WIDE_INTlong)r[i + half_blocks_needed] |
1492 | - (unsigned HOST_WIDE_INTlong)u[i] - b; |
1493 | r[i + half_blocks_needed] = t & HALF_INT_MASK((1L << 32) - 1); |
1494 | b = t >> (HOST_BITS_PER_WIDE_INT64 - 1); |
1495 | } |
1496 | } |
1497 | } |
1498 | |
1499 | if (needs_overflow) |
1500 | { |
1501 | HOST_WIDE_INTlong top; |
1502 | |
1503 | /* For unsigned, overflow is true if any of the top bits are set. |
1504 | For signed, overflow is true if any of the top bits are not equal |
1505 | to the sign bit. */ |
1506 | if (sgn == UNSIGNED) |
1507 | top = 0; |
1508 | else |
1509 | { |
1510 | top = r[(half_blocks_needed) - 1]; |
1511 | top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2))((long) (top << (64 / 2)) < 0 ? -1 : 0); |
1512 | top &= mask; |
1513 | } |
1514 | |
1515 | for (i = half_blocks_needed; i < half_blocks_needed * 2; i++) |
1516 | if (((HOST_WIDE_INTlong)(r[i] & mask)) != top) |
1517 | /* FIXME: Signed overflow type is not implemented yet. */ |
1518 | *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN; |
1519 | } |
1520 | |
1521 | int r_offset = high ? half_blocks_needed : 0; |
1522 | return wi_pack (val, &r[r_offset], half_blocks_needed, prec); |
1523 | } |
1524 | |
1525 | /* Compute the population count of X. */ |
1526 | int |
1527 | wi::popcount (const wide_int_ref &x) |
1528 | { |
1529 | unsigned int i; |
1530 | int count; |
1531 | |
1532 | /* The high order block is special if it is the last block and the |
1533 | precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We |
1534 | have to clear out any ones above the precision before doing |
1535 | popcount on this block. */ |
1536 | count = x.precision - x.len * HOST_BITS_PER_WIDE_INT64; |
1537 | unsigned int stop = x.len; |
1538 | if (count < 0) |
1539 | { |
1540 | count = popcount_hwi (x.uhigh () << -count); |
1541 | stop -= 1; |
1542 | } |
1543 | else |
1544 | { |
1545 | if (x.sign_mask () >= 0) |
1546 | count = 0; |
1547 | } |
1548 | |
1549 | for (i = 0; i < stop; ++i) |
1550 | count += popcount_hwi (x.val[i]); |
1551 | |
1552 | return count; |
1553 | } |
1554 | |
1555 | /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW |
1556 | whether the result overflows when OP0 and OP1 are treated as having |
1557 | signedness SGN. Return the number of blocks in VAL. */ |
1558 | unsigned int |
1559 | wi::sub_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *op0, |
1560 | unsigned int op0len, const HOST_WIDE_INTlong *op1, |
1561 | unsigned int op1len, unsigned int prec, |
1562 | signop sgn, wi::overflow_type *overflow) |
1563 | { |
1564 | unsigned HOST_WIDE_INTlong o0 = 0; |
1565 | unsigned HOST_WIDE_INTlong o1 = 0; |
1566 | unsigned HOST_WIDE_INTlong x = 0; |
1567 | /* We implement subtraction as an in place negate and add. Negation |
1568 | is just inversion and add 1, so we can do the add of 1 by just |
1569 | starting the borrow in of the first element at 1. */ |
1570 | unsigned HOST_WIDE_INTlong borrow = 0; |
1571 | unsigned HOST_WIDE_INTlong old_borrow = 0; |
1572 | |
1573 | unsigned HOST_WIDE_INTlong mask0, mask1; |
1574 | unsigned int i; |
1575 | |
1576 | unsigned int len = MAX (op0len, op1len)((op0len) > (op1len) ? (op0len) : (op1len)); |
1577 | mask0 = -top_bit_of (op0, op0len, prec); |
1578 | mask1 = -top_bit_of (op1, op1len, prec); |
1579 | |
1580 | /* Subtract all of the explicitly defined elements. */ |
1581 | for (i = 0; i < len; i++) |
1582 | { |
1583 | o0 = i < op0len ? (unsigned HOST_WIDE_INTlong)op0[i] : mask0; |
1584 | o1 = i < op1len ? (unsigned HOST_WIDE_INTlong)op1[i] : mask1; |
1585 | x = o0 - o1 - borrow; |
1586 | val[i] = x; |
1587 | old_borrow = borrow; |
1588 | borrow = borrow == 0 ? o0 < o1 : o0 <= o1; |
1589 | } |
1590 | |
1591 | if (len * HOST_BITS_PER_WIDE_INT64 < prec) |
1592 | { |
1593 | val[len] = mask0 - mask1 - borrow; |
1594 | len++; |
1595 | if (overflow) |
1596 | *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE; |
1597 | } |
1598 | else if (overflow) |
1599 | { |
1600 | unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT64; |
1601 | if (sgn == SIGNED) |
1602 | { |
1603 | unsigned HOST_WIDE_INTlong x = (o0 ^ o1) & (val[len - 1] ^ o0); |
1604 | if ((HOST_WIDE_INTlong) (x << shift) < 0) |
1605 | { |
1606 | if (o0 > o1) |
1607 | *overflow = OVF_UNDERFLOW; |
1608 | else if (o0 < o1) |
1609 | *overflow = OVF_OVERFLOW; |
1610 | else |
1611 | *overflow = OVF_NONE; |
1612 | } |
1613 | else |
1614 | *overflow = OVF_NONE; |
1615 | } |
1616 | else |
1617 | { |
1618 | /* Put the MSB of X and O0 and in the top of the HWI. */ |
1619 | x <<= shift; |
1620 | o0 <<= shift; |
1621 | if (old_borrow) |
1622 | *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE; |
1623 | else |
1624 | *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE; |
1625 | } |
1626 | } |
1627 | |
1628 | return canonize (val, len, prec); |
1629 | } |
1630 | |
1631 | |
1632 | /* |
1633 | * Division and Mod |
1634 | */ |
1635 | |
1636 | /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The |
1637 | algorithm is a small modification of the algorithm in Hacker's |
1638 | Delight by Warren, which itself is a small modification of Knuth's |
1639 | algorithm. M is the number of significant elements of U however |
1640 | there needs to be at least one extra element of B_DIVIDEND |
1641 | allocated, N is the number of elements of B_DIVISOR. */ |
1642 | static void |
1643 | divmod_internal_2 (unsigned HOST_HALF_WIDE_INTint *b_quotient, |
1644 | unsigned HOST_HALF_WIDE_INTint *b_remainder, |
1645 | unsigned HOST_HALF_WIDE_INTint *b_dividend, |
1646 | unsigned HOST_HALF_WIDE_INTint *b_divisor, |
1647 | int m, int n) |
1648 | { |
1649 | /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a |
1650 | HOST_WIDE_INT and stored in the lower bits of each word. This |
1651 | algorithm should work properly on both 32 and 64 bit |
1652 | machines. */ |
1653 | unsigned HOST_WIDE_INTlong b |
1654 | = (unsigned HOST_WIDE_INTlong)1 << HOST_BITS_PER_HALF_WIDE_INT32; |
1655 | unsigned HOST_WIDE_INTlong qhat; /* Estimate of quotient digit. */ |
1656 | unsigned HOST_WIDE_INTlong rhat; /* A remainder. */ |
1657 | unsigned HOST_WIDE_INTlong p; /* Product of two digits. */ |
1658 | HOST_WIDE_INTlong t, k; |
1659 | int i, j, s; |
1660 | |
1661 | /* Single digit divisor. */ |
1662 | if (n == 1) |
1663 | { |
1664 | k = 0; |
1665 | for (j = m - 1; j >= 0; j--) |
1666 | { |
1667 | b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0]; |
1668 | k = ((k * b + b_dividend[j]) |
1669 | - ((unsigned HOST_WIDE_INTlong)b_quotient[j] |
1670 | * (unsigned HOST_WIDE_INTlong)b_divisor[0])); |
1671 | } |
1672 | b_remainder[0] = k; |
1673 | return; |
1674 | } |
1675 | |
1676 | s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT32; /* CHECK clz */ |
1677 | |
1678 | if (s) |
1679 | { |
1680 | /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published |
1681 | algorithm, we can overwrite b_dividend and b_divisor, so we do |
1682 | that. */ |
1683 | for (i = n - 1; i > 0; i--) |
1684 | b_divisor[i] = (b_divisor[i] << s) |
1685 | | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT32 - s)); |
1686 | b_divisor[0] = b_divisor[0] << s; |
1687 | |
1688 | b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT32 - s); |
1689 | for (i = m - 1; i > 0; i--) |
1690 | b_dividend[i] = (b_dividend[i] << s) |
1691 | | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT32 - s)); |
1692 | b_dividend[0] = b_dividend[0] << s; |
1693 | } |
1694 | |
1695 | /* Main loop. */ |
1696 | for (j = m - n; j >= 0; j--) |
1697 | { |
1698 | qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1]; |
1699 | rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1]; |
1700 | again: |
1701 | if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2]) |
1702 | { |
1703 | qhat -= 1; |
1704 | rhat += b_divisor[n-1]; |
1705 | if (rhat < b) |
1706 | goto again; |
1707 | } |
1708 | |
1709 | /* Multiply and subtract. */ |
1710 | k = 0; |
1711 | for (i = 0; i < n; i++) |
1712 | { |
1713 | p = qhat * b_divisor[i]; |
1714 | t = b_dividend[i+j] - k - (p & HALF_INT_MASK((1L << 32) - 1)); |
1715 | b_dividend[i + j] = t; |
1716 | k = ((p >> HOST_BITS_PER_HALF_WIDE_INT32) |
1717 | - (t >> HOST_BITS_PER_HALF_WIDE_INT32)); |
1718 | } |
1719 | t = b_dividend[j+n] - k; |
1720 | b_dividend[j+n] = t; |
1721 | |
1722 | b_quotient[j] = qhat; |
1723 | if (t < 0) |
1724 | { |
1725 | b_quotient[j] -= 1; |
1726 | k = 0; |
1727 | for (i = 0; i < n; i++) |
1728 | { |
1729 | t = (HOST_WIDE_INTlong)b_dividend[i+j] + b_divisor[i] + k; |
1730 | b_dividend[i+j] = t; |
1731 | k = t >> HOST_BITS_PER_HALF_WIDE_INT32; |
1732 | } |
1733 | b_dividend[j+n] += k; |
1734 | } |
1735 | } |
1736 | if (s) |
1737 | for (i = 0; i < n; i++) |
1738 | b_remainder[i] = (b_dividend[i] >> s) |
1739 | | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT32 - s)); |
1740 | else |
1741 | for (i = 0; i < n; i++) |
1742 | b_remainder[i] = b_dividend[i]; |
1743 | } |
1744 | |
1745 | |
1746 | /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate |
1747 | the result. If QUOTIENT is nonnull, store the value of the quotient |
1748 | there and return the number of blocks in it. The return value is |
1749 | not defined otherwise. If REMAINDER is nonnull, store the value |
1750 | of the remainder there and store the number of blocks in |
1751 | *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether |
1752 | the division overflowed. */ |
1753 | unsigned int |
1754 | wi::divmod_internal (HOST_WIDE_INTlong *quotient, unsigned int *remainder_len, |
1755 | HOST_WIDE_INTlong *remainder, |
1756 | const HOST_WIDE_INTlong *dividend_val, |
1757 | unsigned int dividend_len, unsigned int dividend_prec, |
1758 | const HOST_WIDE_INTlong *divisor_val, unsigned int divisor_len, |
1759 | unsigned int divisor_prec, signop sgn, |
1760 | wi::overflow_type *oflow) |
1761 | { |
1762 | unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec)(dividend_prec ? (((dividend_prec) + 64 - 1) / 64) : 1); |
1763 | unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec)(divisor_prec ? (((divisor_prec) + 64 - 1) / 64) : 1); |
1764 | unsigned HOST_HALF_WIDE_INTint |
1765 | b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT(64*(8)) / HOST_BITS_PER_HALF_WIDE_INT32]; |
1766 | unsigned HOST_HALF_WIDE_INTint |
1767 | b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT(64*(8)) / HOST_BITS_PER_HALF_WIDE_INT32]; |
1768 | unsigned HOST_HALF_WIDE_INTint |
1769 | b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT(64*(8)) / HOST_BITS_PER_HALF_WIDE_INT32) + 1]; |
1770 | unsigned HOST_HALF_WIDE_INTint |
1771 | b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT(64*(8)) / HOST_BITS_PER_HALF_WIDE_INT32]; |
1772 | unsigned int m, n; |
1773 | bool dividend_neg = false; |
1774 | bool divisor_neg = false; |
1775 | bool overflow = false; |
1776 | wide_int neg_dividend, neg_divisor; |
1777 | |
1778 | wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len, |
1779 | dividend_prec); |
1780 | wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len, |
1781 | divisor_prec); |
1782 | if (divisor == 0) |
1783 | overflow = true; |
1784 | |
1785 | /* The smallest signed number / -1 causes overflow. The dividend_len |
1786 | check is for speed rather than correctness. */ |
1787 | if (sgn == SIGNED |
1788 | && dividend_len == BLOCKS_NEEDED (dividend_prec)(dividend_prec ? (((dividend_prec) + 64 - 1) / 64) : 1) |
1789 | && divisor == -1 |
1790 | && wi::only_sign_bit_p (dividend)) |
1791 | overflow = true; |
1792 | |
1793 | /* Handle the overflow cases. Viewed as unsigned value, the quotient of |
1794 | (signed min / -1) has the same representation as the orignal dividend. |
1795 | We have traditionally made division by zero act as division by one, |
1796 | so there too we use the original dividend. */ |
1797 | if (overflow) |
1798 | { |
1799 | if (remainder) |
1800 | { |
1801 | *remainder_len = 1; |
1802 | remainder[0] = 0; |
1803 | } |
1804 | if (oflow) |
1805 | *oflow = OVF_OVERFLOW; |
1806 | if (quotient) |
1807 | for (unsigned int i = 0; i < dividend_len; ++i) |
1808 | quotient[i] = dividend_val[i]; |
1809 | return dividend_len; |
1810 | } |
1811 | |
1812 | if (oflow) |
1813 | *oflow = OVF_NONE; |
1814 | |
1815 | /* Do it on the host if you can. */ |
1816 | if (sgn == SIGNED |
1817 | && wi::fits_shwi_p (dividend) |
1818 | && wi::fits_shwi_p (divisor)) |
1819 | { |
1820 | HOST_WIDE_INTlong o0 = dividend.to_shwi (); |
1821 | HOST_WIDE_INTlong o1 = divisor.to_shwi (); |
1822 | |
1823 | if (o0 == HOST_WIDE_INT_MIN(long) (1UL << (64 - 1)) && o1 == -1) |
1824 | { |
1825 | gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT)((void)(!(dividend_prec > 64) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 1825, __FUNCTION__), 0 : 0)); |
1826 | if (quotient) |
1827 | { |
1828 | quotient[0] = HOST_WIDE_INT_MIN(long) (1UL << (64 - 1)); |
1829 | quotient[1] = 0; |
1830 | } |
1831 | if (remainder) |
1832 | { |
1833 | remainder[0] = 0; |
1834 | *remainder_len = 1; |
1835 | } |
1836 | return 2; |
1837 | } |
1838 | else |
1839 | { |
1840 | if (quotient) |
1841 | quotient[0] = o0 / o1; |
1842 | if (remainder) |
1843 | { |
1844 | remainder[0] = o0 % o1; |
1845 | *remainder_len = 1; |
1846 | } |
1847 | return 1; |
1848 | } |
1849 | } |
1850 | |
1851 | if (sgn == UNSIGNED |
1852 | && wi::fits_uhwi_p (dividend) |
1853 | && wi::fits_uhwi_p (divisor)) |
1854 | { |
1855 | unsigned HOST_WIDE_INTlong o0 = dividend.to_uhwi (); |
1856 | unsigned HOST_WIDE_INTlong o1 = divisor.to_uhwi (); |
1857 | unsigned int quotient_len = 1; |
1858 | |
1859 | if (quotient) |
1860 | { |
1861 | quotient[0] = o0 / o1; |
1862 | quotient_len = canonize_uhwi (quotient, dividend_prec); |
1863 | } |
1864 | if (remainder) |
1865 | { |
1866 | remainder[0] = o0 % o1; |
1867 | *remainder_len = canonize_uhwi (remainder, dividend_prec); |
1868 | } |
1869 | return quotient_len; |
1870 | } |
1871 | |
1872 | /* Make the divisor and dividend positive and remember what we |
1873 | did. */ |
1874 | if (sgn == SIGNED) |
1875 | { |
1876 | if (wi::neg_p (dividend)) |
1877 | { |
1878 | neg_dividend = -dividend; |
1879 | dividend = neg_dividend; |
1880 | dividend_neg = true; |
1881 | } |
1882 | if (wi::neg_p (divisor)) |
1883 | { |
1884 | neg_divisor = -divisor; |
1885 | divisor = neg_divisor; |
1886 | divisor_neg = true; |
1887 | } |
1888 | } |
1889 | |
1890 | wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (), |
1891 | dividend_blocks_needed, dividend_prec, sgn); |
1892 | wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (), |
1893 | divisor_blocks_needed, divisor_prec, sgn); |
1894 | |
1895 | m = dividend_blocks_needed; |
1896 | b_dividend[m] = 0; |
1897 | while (m > 1 && b_dividend[m - 1] == 0) |
1898 | m--; |
1899 | |
1900 | n = divisor_blocks_needed; |
1901 | while (n > 1 && b_divisor[n - 1] == 0) |
1902 | n--; |
1903 | |
1904 | memset (b_quotient, 0, sizeof (b_quotient)); |
1905 | |
1906 | divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n); |
1907 | |
1908 | unsigned int quotient_len = 0; |
1909 | if (quotient) |
1910 | { |
1911 | quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec); |
1912 | /* The quotient is neg if exactly one of the divisor or dividend is |
1913 | neg. */ |
1914 | if (dividend_neg != divisor_neg) |
1915 | quotient_len = wi::sub_large (quotient, zeros, 1, quotient, |
1916 | quotient_len, dividend_prec, |
1917 | UNSIGNED, 0); |
1918 | } |
1919 | |
1920 | if (remainder) |
1921 | { |
1922 | *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec); |
1923 | /* The remainder is always the same sign as the dividend. */ |
1924 | if (dividend_neg) |
1925 | *remainder_len = wi::sub_large (remainder, zeros, 1, remainder, |
1926 | *remainder_len, dividend_prec, |
1927 | UNSIGNED, 0); |
1928 | } |
1929 | |
1930 | return quotient_len; |
1931 | } |
1932 | |
1933 | /* |
1934 | * Shifting, rotating and extraction. |
1935 | */ |
1936 | |
1937 | /* Left shift XVAL by SHIFT and store the result in VAL. Return the |
1938 | number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */ |
1939 | unsigned int |
1940 | wi::lshift_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
1941 | unsigned int xlen, unsigned int precision, |
1942 | unsigned int shift) |
1943 | { |
1944 | /* Split the shift into a whole-block shift and a subblock shift. */ |
1945 | unsigned int skip = shift / HOST_BITS_PER_WIDE_INT64; |
1946 | unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT64; |
1947 | |
1948 | /* The whole-block shift fills with zeros. */ |
1949 | unsigned int len = BLOCKS_NEEDED (precision)(precision ? (((precision) + 64 - 1) / 64) : 1); |
1950 | for (unsigned int i = 0; i < skip; ++i) |
1951 | val[i] = 0; |
1952 | |
1953 | /* It's easier to handle the simple block case specially. */ |
1954 | if (small_shift == 0) |
1955 | for (unsigned int i = skip; i < len; ++i) |
1956 | val[i] = safe_uhwi (xval, xlen, i - skip); |
1957 | else |
1958 | { |
1959 | /* The first unfilled output block is a left shift of the first |
1960 | block in XVAL. The other output blocks contain bits from two |
1961 | consecutive input blocks. */ |
1962 | unsigned HOST_WIDE_INTlong carry = 0; |
1963 | for (unsigned int i = skip; i < len; ++i) |
1964 | { |
1965 | unsigned HOST_WIDE_INTlong x = safe_uhwi (xval, xlen, i - skip); |
1966 | val[i] = (x << small_shift) | carry; |
1967 | carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT64); |
1968 | } |
1969 | } |
1970 | return canonize (val, len, precision); |
1971 | } |
1972 | |
1973 | /* Right shift XVAL by SHIFT and store the result in VAL. Return the |
1974 | number of blocks in VAL. The input has XPRECISION bits and the |
1975 | output has XPRECISION - SHIFT bits. */ |
1976 | static unsigned int |
1977 | rshift_large_common (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
1978 | unsigned int xlen, unsigned int xprecision, |
1979 | unsigned int shift) |
1980 | { |
1981 | /* Split the shift into a whole-block shift and a subblock shift. */ |
1982 | unsigned int skip = shift / HOST_BITS_PER_WIDE_INT64; |
1983 | unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT64; |
1984 | |
1985 | /* Work out how many blocks are needed to store the significant bits |
1986 | (excluding the upper zeros or signs). */ |
1987 | unsigned int len = BLOCKS_NEEDED (xprecision - shift)(xprecision - shift ? (((xprecision - shift) + 64 - 1) / 64) : 1); |
1988 | |
1989 | /* It's easier to handle the simple block case specially. */ |
1990 | if (small_shift == 0) |
1991 | for (unsigned int i = 0; i < len; ++i) |
1992 | val[i] = safe_uhwi (xval, xlen, i + skip); |
1993 | else |
1994 | { |
1995 | /* Each output block but the last is a combination of two input blocks. |
1996 | The last block is a right shift of the last block in XVAL. */ |
1997 | unsigned HOST_WIDE_INTlong curr = safe_uhwi (xval, xlen, skip); |
1998 | for (unsigned int i = 0; i < len; ++i) |
1999 | { |
2000 | val[i] = curr >> small_shift; |
2001 | curr = safe_uhwi (xval, xlen, i + skip + 1); |
2002 | val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT64); |
2003 | } |
2004 | } |
2005 | return len; |
2006 | } |
2007 | |
2008 | /* Logically right shift XVAL by SHIFT and store the result in VAL. |
2009 | Return the number of blocks in VAL. XVAL has XPRECISION bits and |
2010 | VAL has PRECISION bits. */ |
2011 | unsigned int |
2012 | wi::lrshift_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
2013 | unsigned int xlen, unsigned int xprecision, |
2014 | unsigned int precision, unsigned int shift) |
2015 | { |
2016 | unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift); |
2017 | |
2018 | /* The value we just created has precision XPRECISION - SHIFT. |
2019 | Zero-extend it to wider precisions. */ |
2020 | if (precision > xprecision - shift) |
2021 | { |
2022 | unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT64; |
2023 | if (small_prec) |
2024 | val[len - 1] = zext_hwi (val[len - 1], small_prec); |
2025 | else if (val[len - 1] < 0) |
2026 | { |
2027 | /* Add a new block with a zero. */ |
2028 | val[len++] = 0; |
2029 | return len; |
2030 | } |
2031 | } |
2032 | return canonize (val, len, precision); |
2033 | } |
2034 | |
2035 | /* Arithmetically right shift XVAL by SHIFT and store the result in VAL. |
2036 | Return the number of blocks in VAL. XVAL has XPRECISION bits and |
2037 | VAL has PRECISION bits. */ |
2038 | unsigned int |
2039 | wi::arshift_large (HOST_WIDE_INTlong *val, const HOST_WIDE_INTlong *xval, |
2040 | unsigned int xlen, unsigned int xprecision, |
2041 | unsigned int precision, unsigned int shift) |
2042 | { |
2043 | unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift); |
2044 | |
2045 | /* The value we just created has precision XPRECISION - SHIFT. |
2046 | Sign-extend it to wider types. */ |
2047 | if (precision > xprecision - shift) |
2048 | { |
2049 | unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT64; |
2050 | if (small_prec) |
2051 | val[len - 1] = sext_hwi (val[len - 1], small_prec); |
2052 | } |
2053 | return canonize (val, len, precision); |
2054 | } |
2055 | |
2056 | /* Return the number of leading (upper) zeros in X. */ |
2057 | int |
2058 | wi::clz (const wide_int_ref &x) |
2059 | { |
2060 | if (x.sign_mask () < 0) |
2061 | /* The upper bit is set, so there are no leading zeros. */ |
2062 | return 0; |
2063 | |
2064 | /* Calculate how many bits there above the highest represented block. */ |
2065 | int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT64; |
2066 | |
2067 | unsigned HOST_WIDE_INTlong high = x.uhigh (); |
2068 | if (count < 0) |
2069 | /* The upper -COUNT bits of HIGH are not part of the value. |
2070 | Clear them out. */ |
2071 | high = (high << -count) >> -count; |
2072 | |
2073 | /* We don't need to look below HIGH. Either HIGH is nonzero, |
2074 | or the top bit of the block below is nonzero; clz_hwi is |
2075 | HOST_BITS_PER_WIDE_INT in the latter case. */ |
2076 | return count + clz_hwi (high); |
2077 | } |
2078 | |
2079 | /* Return the number of redundant sign bits in X. (That is, the number |
2080 | of bits immediately below the sign bit that have the same value as |
2081 | the sign bit.) */ |
2082 | int |
2083 | wi::clrsb (const wide_int_ref &x) |
2084 | { |
2085 | /* Calculate how many bits there above the highest represented block. */ |
2086 | int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT64; |
2087 | |
2088 | unsigned HOST_WIDE_INTlong high = x.uhigh (); |
2089 | unsigned HOST_WIDE_INTlong mask = -1; |
2090 | if (count < 0) |
2091 | { |
2092 | /* The upper -COUNT bits of HIGH are not part of the value. |
2093 | Clear them from both MASK and HIGH. */ |
2094 | mask >>= -count; |
2095 | high &= mask; |
2096 | } |
2097 | |
2098 | /* If the top bit is 1, count the number of leading 1s. If the top |
2099 | bit is zero, count the number of leading zeros. */ |
2100 | if (high > mask / 2) |
2101 | high ^= mask; |
2102 | |
2103 | /* There are no sign bits below the top block, so we don't need to look |
2104 | beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when |
2105 | HIGH is 0. */ |
2106 | return count + clz_hwi (high) - 1; |
2107 | } |
2108 | |
2109 | /* Return the number of trailing (lower) zeros in X. */ |
2110 | int |
2111 | wi::ctz (const wide_int_ref &x) |
2112 | { |
2113 | if (x.len == 1 && x.ulow () == 0) |
2114 | return x.precision; |
2115 | |
2116 | /* Having dealt with the zero case, there must be a block with a |
2117 | nonzero bit. We don't care about the bits above the first 1. */ |
2118 | unsigned int i = 0; |
2119 | while (x.val[i] == 0) |
2120 | ++i; |
2121 | return i * HOST_BITS_PER_WIDE_INT64 + ctz_hwi (x.val[i]); |
2122 | } |
2123 | |
2124 | /* If X is an exact power of 2, return the base-2 logarithm, otherwise |
2125 | return -1. */ |
2126 | int |
2127 | wi::exact_log2 (const wide_int_ref &x) |
2128 | { |
2129 | /* Reject cases where there are implicit -1 blocks above HIGH. */ |
2130 | if (x.len * HOST_BITS_PER_WIDE_INT64 < x.precision && x.sign_mask () < 0) |
2131 | return -1; |
2132 | |
2133 | /* Set CRUX to the index of the entry that should be nonzero. |
2134 | If the top block is zero then the next lowest block (if any) |
2135 | must have the high bit set. */ |
2136 | unsigned int crux = x.len - 1; |
2137 | if (crux > 0 && x.val[crux] == 0) |
2138 | crux -= 1; |
2139 | |
2140 | /* Check that all lower blocks are zero. */ |
2141 | for (unsigned int i = 0; i < crux; ++i) |
2142 | if (x.val[i] != 0) |
2143 | return -1; |
2144 | |
2145 | /* Get a zero-extended form of block CRUX. */ |
2146 | unsigned HOST_WIDE_INTlong hwi = x.val[crux]; |
2147 | if ((crux + 1) * HOST_BITS_PER_WIDE_INT64 > x.precision) |
2148 | hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT64); |
2149 | |
2150 | /* Now it's down to whether HWI is a power of 2. */ |
2151 | int res = ::exact_log2 (hwi); |
2152 | if (res >= 0) |
2153 | res += crux * HOST_BITS_PER_WIDE_INT64; |
2154 | return res; |
2155 | } |
2156 | |
2157 | /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */ |
2158 | int |
2159 | wi::floor_log2 (const wide_int_ref &x) |
2160 | { |
2161 | return x.precision - 1 - clz (x); |
2162 | } |
2163 | |
2164 | /* Return the index of the first (lowest) set bit in X, counting from 1. |
2165 | Return 0 if X is 0. */ |
2166 | int |
2167 | wi::ffs (const wide_int_ref &x) |
2168 | { |
2169 | return eq_p (x, 0) ? 0 : ctz (x) + 1; |
2170 | } |
2171 | |
2172 | /* Return true if sign-extending X to have precision PRECISION would give |
2173 | the minimum signed value at that precision. */ |
2174 | bool |
2175 | wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision) |
2176 | { |
2177 | return ctz (x) + 1 == int (precision); |
2178 | } |
2179 | |
2180 | /* Return true if X represents the minimum signed value. */ |
2181 | bool |
2182 | wi::only_sign_bit_p (const wide_int_ref &x) |
2183 | { |
2184 | return only_sign_bit_p (x, x.precision); |
2185 | } |
2186 | |
2187 | /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL |
2188 | down to the previous value that has no bits set outside MASK. |
2189 | This rounding wraps for signed values if VAL is negative and |
2190 | the top bit of MASK is clear. |
2191 | |
2192 | For example, round_down_for_mask (6, 0xf1) would give 1 and |
2193 | round_down_for_mask (24, 0xf1) would give 17. */ |
2194 | |
2195 | wide_int |
2196 | wi::round_down_for_mask (const wide_int &val, const wide_int &mask) |
2197 | { |
2198 | /* Get the bits in VAL that are outside the mask. */ |
2199 | wide_int extra_bits = wi::bit_and_not (val, mask); |
2200 | if (extra_bits == 0) |
2201 | return val; |
2202 | |
2203 | /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s |
2204 | below that bit. */ |
2205 | unsigned int precision = val.get_precision (); |
2206 | wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits), |
2207 | false, precision); |
2208 | |
2209 | /* Clear the bits that aren't in MASK, but ensure that all bits |
2210 | in MASK below the top cleared bit are set. */ |
2211 | return (val & mask) | (mask & lower_mask); |
2212 | } |
2213 | |
2214 | /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL |
2215 | up to the next value that has no bits set outside MASK. The rounding |
2216 | wraps if there are no suitable values greater than VAL. |
2217 | |
2218 | For example, round_up_for_mask (6, 0xf1) would give 16 and |
2219 | round_up_for_mask (24, 0xf1) would give 32. */ |
2220 | |
2221 | wide_int |
2222 | wi::round_up_for_mask (const wide_int &val, const wide_int &mask) |
2223 | { |
2224 | /* Get the bits in VAL that are outside the mask. */ |
2225 | wide_int extra_bits = wi::bit_and_not (val, mask); |
2226 | if (extra_bits == 0) |
2227 | return val; |
2228 | |
2229 | /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */ |
2230 | unsigned int precision = val.get_precision (); |
2231 | wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits), |
2232 | true, precision); |
2233 | |
2234 | /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */ |
2235 | upper_mask &= mask; |
2236 | |
2237 | /* Conceptually we need to: |
2238 | |
2239 | - clear bits of VAL outside UPPER_MASK |
2240 | - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0) |
2241 | - propagate the carry through the bits of VAL in UPPER_MASK |
2242 | |
2243 | If (~VAL & UPPER_MASK) is nonzero, the carry eventually |
2244 | reaches that bit and the process leaves all lower bits clear. |
2245 | If (~VAL & UPPER_MASK) is zero then the result is also zero. */ |
2246 | wide_int tmp = wi::bit_and_not (upper_mask, val); |
2247 | |
2248 | return (val | tmp) & -tmp; |
2249 | } |
2250 | |
2251 | /* Compute the modular multiplicative inverse of A modulo B |
2252 | using extended Euclid's algorithm. Assumes A and B are coprime, |
2253 | and that A and B have the same precision. */ |
2254 | wide_int |
2255 | wi::mod_inv (const wide_int &a, const wide_int &b) |
2256 | { |
2257 | /* Verify the assumption. */ |
2258 | gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1))((void)(!(wi::eq_p (wi::gcd (a, b), 1)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2258, __FUNCTION__), 0 : 0)); |
2259 | |
2260 | unsigned int p = a.get_precision () + 1; |
2261 | gcc_checking_assert (b.get_precision () + 1 == p)((void)(!(b.get_precision () + 1 == p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2261, __FUNCTION__), 0 : 0)); |
2262 | wide_int c = wide_int::from (a, p, UNSIGNED); |
2263 | wide_int d = wide_int::from (b, p, UNSIGNED); |
2264 | wide_int x0 = wide_int::from (0, p, UNSIGNED); |
2265 | wide_int x1 = wide_int::from (1, p, UNSIGNED); |
2266 | |
2267 | if (wi::eq_p (b, 1)) |
2268 | return wide_int::from (1, p, UNSIGNED); |
2269 | |
2270 | while (wi::gt_p (c, 1, UNSIGNED)) |
2271 | { |
2272 | wide_int t = d; |
2273 | wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d); |
2274 | c = t; |
2275 | wide_int s = x0; |
2276 | x0 = wi::sub (x1, wi::mul (q, x0)); |
2277 | x1 = s; |
2278 | } |
2279 | if (wi::lt_p (x1, 0, SIGNED)) |
2280 | x1 += d; |
2281 | return x1; |
2282 | } |
2283 | |
2284 | /* |
2285 | * Private utilities. |
2286 | */ |
2287 | |
2288 | void gt_ggc_mx (widest_int *) { } |
2289 | void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { } |
2290 | void gt_pch_nx (widest_int *) { } |
2291 | |
2292 | template void wide_int::dump () const; |
2293 | template void generic_wide_int <wide_int_ref_storage <false> >::dump () const; |
2294 | template void generic_wide_int <wide_int_ref_storage <true> >::dump () const; |
2295 | template void offset_int::dump () const; |
2296 | template void widest_int::dump () const; |
2297 | |
2298 | /* We could add all the above ::dump variants here, but wide_int and |
2299 | widest_int should handle the common cases. Besides, you can always |
2300 | call the dump method directly. */ |
2301 | |
2302 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
2303 | debug (const wide_int &ref) |
2304 | { |
2305 | ref.dump (); |
2306 | } |
2307 | |
2308 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
2309 | debug (const wide_int *ptr) |
2310 | { |
2311 | if (ptr) |
2312 | debug (*ptr); |
2313 | else |
2314 | fprintf (stderrstderr, "<nil>\n"); |
2315 | } |
2316 | |
2317 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
2318 | debug (const widest_int &ref) |
2319 | { |
2320 | ref.dump (); |
2321 | } |
2322 | |
2323 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
2324 | debug (const widest_int *ptr) |
2325 | { |
2326 | if (ptr) |
2327 | debug (*ptr); |
2328 | else |
2329 | fprintf (stderrstderr, "<nil>\n"); |
2330 | } |
2331 | |
2332 | #if CHECKING_P1 |
2333 | |
2334 | namespace selftest { |
2335 | |
2336 | /* Selftests for wide ints. We run these multiple times, once per type. */ |
2337 | |
2338 | /* Helper function for building a test value. */ |
2339 | |
2340 | template <class VALUE_TYPE> |
2341 | static VALUE_TYPE |
2342 | from_int (int i); |
2343 | |
2344 | /* Specializations of the fixture for each wide-int type. */ |
2345 | |
2346 | /* Specialization for VALUE_TYPE == wide_int. */ |
2347 | |
2348 | template <> |
2349 | wide_int |
2350 | from_int (int i) |
2351 | { |
2352 | return wi::shwi (i, 32); |
2353 | } |
2354 | |
2355 | /* Specialization for VALUE_TYPE == offset_int. */ |
2356 | |
2357 | template <> |
2358 | offset_int |
2359 | from_int (int i) |
2360 | { |
2361 | return offset_int (i); |
2362 | } |
2363 | |
2364 | /* Specialization for VALUE_TYPE == widest_int. */ |
2365 | |
2366 | template <> |
2367 | widest_int |
2368 | from_int (int i) |
2369 | { |
2370 | return widest_int (i); |
2371 | } |
2372 | |
2373 | /* Verify that print_dec (WI, ..., SGN) gives the expected string |
2374 | representation (using base 10). */ |
2375 | |
2376 | static void |
2377 | assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn) |
2378 | { |
2379 | char buf[WIDE_INT_PRINT_BUFFER_SIZE(((((64*(8)) + 64) / 64) * 64) / 4 + 4)]; |
2380 | print_dec (wi, buf, sgn); |
2381 | ASSERT_STREQ (expected, buf)do { ::selftest::assert_streq ((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2381, __FUNCTION__)), "expected", "buf", (expected), (buf)) ; } while (0); |
2382 | } |
2383 | |
2384 | /* Likewise for base 16. */ |
2385 | |
2386 | static void |
2387 | assert_hexeq (const char *expected, const wide_int_ref &wi) |
2388 | { |
2389 | char buf[WIDE_INT_PRINT_BUFFER_SIZE(((((64*(8)) + 64) / 64) * 64) / 4 + 4)]; |
2390 | print_hex (wi, buf); |
2391 | ASSERT_STREQ (expected, buf)do { ::selftest::assert_streq ((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2391, __FUNCTION__)), "expected", "buf", (expected), (buf)) ; } while (0); |
2392 | } |
2393 | |
2394 | /* Test cases. */ |
2395 | |
2396 | /* Verify that print_dec and print_hex work for VALUE_TYPE. */ |
2397 | |
2398 | template <class VALUE_TYPE> |
2399 | static void |
2400 | test_printing () |
2401 | { |
2402 | VALUE_TYPE a = from_int<VALUE_TYPE> (42); |
2403 | assert_deceq ("42", a, SIGNED); |
2404 | assert_hexeq ("0x2a", a); |
2405 | assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69)); |
2406 | assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69)); |
2407 | assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false)); |
2408 | if (WIDE_INT_MAX_PRECISION((((64*(8)) + 64) / 64) * 64) > 128) |
2409 | { |
2410 | assert_hexeq ("0x20000000000000000fffffffffffffffe", |
2411 | wi::lshift (1, 129) + wi::lshift (1, 64) - 2); |
2412 | assert_hexeq ("0x200000000000004000123456789abcdef", |
2413 | wi::lshift (1, 129) + wi::lshift (1, 74) |
2414 | + wi::lshift (0x1234567, 32) + 0x89abcdef); |
2415 | } |
2416 | } |
2417 | |
2418 | /* Verify that various operations work correctly for VALUE_TYPE, |
2419 | unary and binary, using both function syntax, and |
2420 | overloaded-operators. */ |
2421 | |
2422 | template <class VALUE_TYPE> |
2423 | static void |
2424 | test_ops () |
2425 | { |
2426 | VALUE_TYPE a = from_int<VALUE_TYPE> (7); |
2427 | VALUE_TYPE b = from_int<VALUE_TYPE> (3); |
2428 | |
2429 | /* Using functions. */ |
2430 | assert_deceq ("-7", wi::neg (a), SIGNED); |
2431 | assert_deceq ("10", wi::add (a, b), SIGNED); |
2432 | assert_deceq ("4", wi::sub (a, b), SIGNED); |
2433 | assert_deceq ("-4", wi::sub (b, a), SIGNED); |
2434 | assert_deceq ("21", wi::mul (a, b), SIGNED); |
2435 | |
2436 | /* Using operators. */ |
2437 | assert_deceq ("-7", -a, SIGNED); |
2438 | assert_deceq ("10", a + b, SIGNED); |
2439 | assert_deceq ("4", a - b, SIGNED); |
2440 | assert_deceq ("-4", b - a, SIGNED); |
2441 | assert_deceq ("21", a * b, SIGNED); |
2442 | } |
2443 | |
2444 | /* Verify that various comparisons work correctly for VALUE_TYPE. */ |
2445 | |
2446 | template <class VALUE_TYPE> |
2447 | static void |
2448 | test_comparisons () |
2449 | { |
2450 | VALUE_TYPE a = from_int<VALUE_TYPE> (7); |
2451 | VALUE_TYPE b = from_int<VALUE_TYPE> (3); |
2452 | |
2453 | /* == */ |
2454 | ASSERT_TRUE (wi::eq_p (a, a))do { const char *desc_ = "ASSERT_TRUE (" "(wi::eq_p (a, a))" ")" ; bool actual_ = ((wi::eq_p (a, a))); if (actual_) ::selftest ::pass (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2454, __FUNCTION__))), desc_); else ::selftest::fail (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2454, __FUNCTION__))), desc_); } while (0); |
2455 | ASSERT_FALSE (wi::eq_p (a, b))do { const char *desc_ = "ASSERT_FALSE (" "(wi::eq_p (a, b))" ")"; bool actual_ = ((wi::eq_p (a, b))); if (actual_) ::selftest ::fail (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2455, __FUNCTION__))), desc_); else ::selftest::pass (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2455, __FUNCTION__))), desc_); } while (0); |
2456 | |
2457 | /* != */ |
2458 | ASSERT_TRUE (wi::ne_p (a, b))do { const char *desc_ = "ASSERT_TRUE (" "(wi::ne_p (a, b))" ")" ; bool actual_ = ((wi::ne_p (a, b))); if (actual_) ::selftest ::pass (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2458, __FUNCTION__))), desc_); else ::selftest::fail (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2458, __FUNCTION__))), desc_); } while (0); |
2459 | ASSERT_FALSE (wi::ne_p (a, a))do { const char *desc_ = "ASSERT_FALSE (" "(wi::ne_p (a, a))" ")"; bool actual_ = ((wi::ne_p (a, a))); if (actual_) ::selftest ::fail (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2459, __FUNCTION__))), desc_); else ::selftest::pass (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2459, __FUNCTION__))), desc_); } while (0); |
2460 | |
2461 | /* < */ |
2462 | ASSERT_FALSE (wi::lts_p (a, a))do { const char *desc_ = "ASSERT_FALSE (" "(wi::lts_p (a, a))" ")"; bool actual_ = ((wi::lts_p (a, a))); if (actual_) ::selftest ::fail (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2462, __FUNCTION__))), desc_); else ::selftest::pass (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2462, __FUNCTION__))), desc_); } while (0); |
2463 | ASSERT_FALSE (wi::lts_p (a, b))do { const char *desc_ = "ASSERT_FALSE (" "(wi::lts_p (a, b))" ")"; bool actual_ = ((wi::lts_p (a, b))); if (actual_) ::selftest ::fail (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2463, __FUNCTION__))), desc_); else ::selftest::pass (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2463, __FUNCTION__))), desc_); } while (0); |
2464 | ASSERT_TRUE (wi::lts_p (b, a))do { const char *desc_ = "ASSERT_TRUE (" "(wi::lts_p (b, a))" ")"; bool actual_ = ((wi::lts_p (b, a))); if (actual_) ::selftest ::pass (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2464, __FUNCTION__))), desc_); else ::selftest::fail (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2464, __FUNCTION__))), desc_); } while (0); |
2465 | |
2466 | /* <= */ |
2467 | ASSERT_TRUE (wi::les_p (a, a))do { const char *desc_ = "ASSERT_TRUE (" "(wi::les_p (a, a))" ")"; bool actual_ = ((wi::les_p (a, a))); if (actual_) ::selftest ::pass (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2467, __FUNCTION__))), desc_); else ::selftest::fail (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2467, __FUNCTION__))), desc_); } while (0); |
2468 | ASSERT_FALSE (wi::les_p (a, b))do { const char *desc_ = "ASSERT_FALSE (" "(wi::les_p (a, b))" ")"; bool actual_ = ((wi::les_p (a, b))); if (actual_) ::selftest ::fail (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2468, __FUNCTION__))), desc_); else ::selftest::pass (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2468, __FUNCTION__))), desc_); } while (0); |
2469 | ASSERT_TRUE (wi::les_p (b, a))do { const char *desc_ = "ASSERT_TRUE (" "(wi::les_p (b, a))" ")"; bool actual_ = ((wi::les_p (b, a))); if (actual_) ::selftest ::pass (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2469, __FUNCTION__))), desc_); else ::selftest::fail (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2469, __FUNCTION__))), desc_); } while (0); |
2470 | |
2471 | /* > */ |
2472 | ASSERT_FALSE (wi::gts_p (a, a))do { const char *desc_ = "ASSERT_FALSE (" "(wi::gts_p (a, a))" ")"; bool actual_ = ((wi::gts_p (a, a))); if (actual_) ::selftest ::fail (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2472, __FUNCTION__))), desc_); else ::selftest::pass (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2472, __FUNCTION__))), desc_); } while (0); |
2473 | ASSERT_TRUE (wi::gts_p (a, b))do { const char *desc_ = "ASSERT_TRUE (" "(wi::gts_p (a, b))" ")"; bool actual_ = ((wi::gts_p (a, b))); if (actual_) ::selftest ::pass (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2473, __FUNCTION__))), desc_); else ::selftest::fail (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2473, __FUNCTION__))), desc_); } while (0); |
2474 | ASSERT_FALSE (wi::gts_p (b, a))do { const char *desc_ = "ASSERT_FALSE (" "(wi::gts_p (b, a))" ")"; bool actual_ = ((wi::gts_p (b, a))); if (actual_) ::selftest ::fail (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2474, __FUNCTION__))), desc_); else ::selftest::pass (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2474, __FUNCTION__))), desc_); } while (0); |
2475 | |
2476 | /* >= */ |
2477 | ASSERT_TRUE (wi::ges_p (a, a))do { const char *desc_ = "ASSERT_TRUE (" "(wi::ges_p (a, a))" ")"; bool actual_ = ((wi::ges_p (a, a))); if (actual_) ::selftest ::pass (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2477, __FUNCTION__))), desc_); else ::selftest::fail (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2477, __FUNCTION__))), desc_); } while (0); |
2478 | ASSERT_TRUE (wi::ges_p (a, b))do { const char *desc_ = "ASSERT_TRUE (" "(wi::ges_p (a, b))" ")"; bool actual_ = ((wi::ges_p (a, b))); if (actual_) ::selftest ::pass (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2478, __FUNCTION__))), desc_); else ::selftest::fail (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2478, __FUNCTION__))), desc_); } while (0); |
2479 | ASSERT_FALSE (wi::ges_p (b, a))do { const char *desc_ = "ASSERT_FALSE (" "(wi::ges_p (b, a))" ")"; bool actual_ = ((wi::ges_p (b, a))); if (actual_) ::selftest ::fail (((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2479, __FUNCTION__))), desc_); else ::selftest::pass (((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2479, __FUNCTION__))), desc_); } while (0); |
2480 | |
2481 | /* comparison */ |
2482 | ASSERT_EQ (-1, wi::cmps (b, a))do { const char *desc_ = "ASSERT_EQ (" "(-1)" ", " "(wi::cmps (b, a))" ")"; if (((-1)) == ((wi::cmps (b, a)))) ::selftest::pass ((( (::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2482, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2482, __FUNCTION__)))), desc_); } while (0); |
2483 | ASSERT_EQ (0, wi::cmps (a, a))do { const char *desc_ = "ASSERT_EQ (" "(0)" ", " "(wi::cmps (a, a))" ")"; if (((0)) == ((wi::cmps (a, a)))) ::selftest::pass (((( ::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2483, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2483, __FUNCTION__)))), desc_); } while (0); |
2484 | ASSERT_EQ (1, wi::cmps (a, b))do { const char *desc_ = "ASSERT_EQ (" "(1)" ", " "(wi::cmps (a, b))" ")"; if (((1)) == ((wi::cmps (a, b)))) ::selftest::pass (((( ::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2484, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2484, __FUNCTION__)))), desc_); } while (0); |
2485 | } |
2486 | |
2487 | /* Run all of the selftests, using the given VALUE_TYPE. */ |
2488 | |
2489 | template <class VALUE_TYPE> |
2490 | static void run_all_wide_int_tests () |
2491 | { |
2492 | test_printing <VALUE_TYPE> (); |
2493 | test_ops <VALUE_TYPE> (); |
2494 | test_comparisons <VALUE_TYPE> (); |
2495 | } |
2496 | |
2497 | /* Test overflow conditions. */ |
2498 | |
2499 | static void |
2500 | test_overflow () |
2501 | { |
2502 | static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 }; |
2503 | static int offsets[] = { 16, 1, 0 }; |
2504 | for (unsigned int i = 0; i < ARRAY_SIZE (precs)(sizeof (precs) / sizeof ((precs)[0])); ++i) |
2505 | for (unsigned int j = 0; j < ARRAY_SIZE (offsets)(sizeof (offsets) / sizeof ((offsets)[0])); ++j) |
2506 | { |
2507 | int prec = precs[i]; |
2508 | int offset = offsets[j]; |
2509 | wi::overflow_type overflow; |
2510 | wide_int sum, diff; |
2511 | |
2512 | sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1, |
2513 | UNSIGNED, &overflow); |
2514 | ASSERT_EQ (sum, -offset)do { const char *desc_ = "ASSERT_EQ (" "(sum)" ", " "(-offset)" ")"; if (((sum)) == ((-offset))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2514, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2514, __FUNCTION__)))), desc_); } while (0); |
2515 | ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0)do { const char *desc_ = "ASSERT_EQ (" "(overflow != wi::OVF_NONE)" ", " "(offset == 0)" ")"; if (((overflow != wi::OVF_NONE)) == ((offset == 0))) ::selftest::pass ((((::selftest::location ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2515, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2515, __FUNCTION__)))), desc_); } while (0); |
2516 | |
2517 | sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset, |
2518 | UNSIGNED, &overflow); |
2519 | ASSERT_EQ (sum, -offset)do { const char *desc_ = "ASSERT_EQ (" "(sum)" ", " "(-offset)" ")"; if (((sum)) == ((-offset))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2519, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2519, __FUNCTION__)))), desc_); } while (0); |
2520 | ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0)do { const char *desc_ = "ASSERT_EQ (" "(overflow != wi::OVF_NONE)" ", " "(offset == 0)" ")"; if (((overflow != wi::OVF_NONE)) == ((offset == 0))) ::selftest::pass ((((::selftest::location ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2520, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2520, __FUNCTION__)))), desc_); } while (0); |
2521 | |
2522 | diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset, |
2523 | wi::max_value (prec, UNSIGNED), |
2524 | UNSIGNED, &overflow); |
2525 | ASSERT_EQ (diff, -offset)do { const char *desc_ = "ASSERT_EQ (" "(diff)" ", " "(-offset)" ")"; if (((diff)) == ((-offset))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2525, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2525, __FUNCTION__)))), desc_); } while (0); |
2526 | ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0)do { const char *desc_ = "ASSERT_EQ (" "(overflow != wi::OVF_NONE)" ", " "(offset != 0)" ")"; if (((overflow != wi::OVF_NONE)) == ((offset != 0))) ::selftest::pass ((((::selftest::location ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2526, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2526, __FUNCTION__)))), desc_); } while (0); |
2527 | |
2528 | diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset, |
2529 | wi::max_value (prec, UNSIGNED) - 1, |
2530 | UNSIGNED, &overflow); |
2531 | ASSERT_EQ (diff, 1 - offset)do { const char *desc_ = "ASSERT_EQ (" "(diff)" ", " "(1 - offset)" ")"; if (((diff)) == ((1 - offset))) ::selftest::pass ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2531, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2531, __FUNCTION__)))), desc_); } while (0); |
2532 | ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1)do { const char *desc_ = "ASSERT_EQ (" "(overflow != wi::OVF_NONE)" ", " "(offset > 1)" ")"; if (((overflow != wi::OVF_NONE)) == ((offset > 1))) ::selftest::pass ((((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2532, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2532, __FUNCTION__)))), desc_); } while (0); |
2533 | } |
2534 | } |
2535 | |
2536 | /* Test the round_{down,up}_for_mask functions. */ |
2537 | |
2538 | static void |
2539 | test_round_for_mask () |
2540 | { |
2541 | unsigned int prec = 18; |
2542 | ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),do { const char *desc_ = "ASSERT_EQ (" "(17)" ", " "(wi::round_down_for_mask (wi::shwi (17, prec), wi::shwi (0xf1, prec)))" ")"; if (((17)) == ((wi::round_down_for_mask (wi::shwi (17, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2543, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2543, __FUNCTION__)))), desc_); } while (0) |
2543 | wi::shwi (0xf1, prec)))do { const char *desc_ = "ASSERT_EQ (" "(17)" ", " "(wi::round_down_for_mask (wi::shwi (17, prec), wi::shwi (0xf1, prec)))" ")"; if (((17)) == ((wi::round_down_for_mask (wi::shwi (17, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2543, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2543, __FUNCTION__)))), desc_); } while (0); |
2544 | ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),do { const char *desc_ = "ASSERT_EQ (" "(17)" ", " "(wi::round_up_for_mask (wi::shwi (17, prec), wi::shwi (0xf1, prec)))" ")"; if (((17)) == ((wi::round_up_for_mask (wi::shwi (17, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2545, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2545, __FUNCTION__)))), desc_); } while (0) |
2545 | wi::shwi (0xf1, prec)))do { const char *desc_ = "ASSERT_EQ (" "(17)" ", " "(wi::round_up_for_mask (wi::shwi (17, prec), wi::shwi (0xf1, prec)))" ")"; if (((17)) == ((wi::round_up_for_mask (wi::shwi (17, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2545, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2545, __FUNCTION__)))), desc_); } while (0); |
2546 | |
2547 | ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),do { const char *desc_ = "ASSERT_EQ (" "(1)" ", " "(wi::round_down_for_mask (wi::shwi (6, prec), wi::shwi (0xf1, prec)))" ")"; if (((1)) == ((wi::round_down_for_mask (wi::shwi (6, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2548, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2548, __FUNCTION__)))), desc_); } while (0) |
2548 | wi::shwi (0xf1, prec)))do { const char *desc_ = "ASSERT_EQ (" "(1)" ", " "(wi::round_down_for_mask (wi::shwi (6, prec), wi::shwi (0xf1, prec)))" ")"; if (((1)) == ((wi::round_down_for_mask (wi::shwi (6, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2548, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2548, __FUNCTION__)))), desc_); } while (0); |
2549 | ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),do { const char *desc_ = "ASSERT_EQ (" "(16)" ", " "(wi::round_up_for_mask (wi::shwi (6, prec), wi::shwi (0xf1, prec)))" ")"; if (((16)) == ((wi::round_up_for_mask (wi::shwi (6, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2550, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2550, __FUNCTION__)))), desc_); } while (0) |
2550 | wi::shwi (0xf1, prec)))do { const char *desc_ = "ASSERT_EQ (" "(16)" ", " "(wi::round_up_for_mask (wi::shwi (6, prec), wi::shwi (0xf1, prec)))" ")"; if (((16)) == ((wi::round_up_for_mask (wi::shwi (6, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2550, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2550, __FUNCTION__)))), desc_); } while (0); |
2551 | |
2552 | ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),do { const char *desc_ = "ASSERT_EQ (" "(17)" ", " "(wi::round_down_for_mask (wi::shwi (24, prec), wi::shwi (0xf1, prec)))" ")"; if (((17)) == ((wi::round_down_for_mask (wi::shwi (24, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2553, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2553, __FUNCTION__)))), desc_); } while (0) |
2553 | wi::shwi (0xf1, prec)))do { const char *desc_ = "ASSERT_EQ (" "(17)" ", " "(wi::round_down_for_mask (wi::shwi (24, prec), wi::shwi (0xf1, prec)))" ")"; if (((17)) == ((wi::round_down_for_mask (wi::shwi (24, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2553, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2553, __FUNCTION__)))), desc_); } while (0); |
2554 | ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),do { const char *desc_ = "ASSERT_EQ (" "(32)" ", " "(wi::round_up_for_mask (wi::shwi (24, prec), wi::shwi (0xf1, prec)))" ")"; if (((32)) == ((wi::round_up_for_mask (wi::shwi (24, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2555, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2555, __FUNCTION__)))), desc_); } while (0) |
2555 | wi::shwi (0xf1, prec)))do { const char *desc_ = "ASSERT_EQ (" "(32)" ", " "(wi::round_up_for_mask (wi::shwi (24, prec), wi::shwi (0xf1, prec)))" ")"; if (((32)) == ((wi::round_up_for_mask (wi::shwi (24, prec ), wi::shwi (0xf1, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2555, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2555, __FUNCTION__)))), desc_); } while (0); |
2556 | |
2557 | ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),do { const char *desc_ = "ASSERT_EQ (" "(0x011)" ", " "(wi::round_down_for_mask (wi::shwi (0x22, prec), wi::shwi (0x111, prec)))" ")"; if (((0x011)) == ((wi::round_down_for_mask (wi::shwi (0x22 , prec), wi::shwi (0x111, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2558, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2558, __FUNCTION__)))), desc_); } while (0) |
2558 | wi::shwi (0x111, prec)))do { const char *desc_ = "ASSERT_EQ (" "(0x011)" ", " "(wi::round_down_for_mask (wi::shwi (0x22, prec), wi::shwi (0x111, prec)))" ")"; if (((0x011)) == ((wi::round_down_for_mask (wi::shwi (0x22 , prec), wi::shwi (0x111, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2558, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2558, __FUNCTION__)))), desc_); } while (0); |
2559 | ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),do { const char *desc_ = "ASSERT_EQ (" "(0x100)" ", " "(wi::round_up_for_mask (wi::shwi (0x22, prec), wi::shwi (0x111, prec)))" ")"; if (((0x100)) == ((wi::round_up_for_mask (wi::shwi (0x22 , prec), wi::shwi (0x111, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2560, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2560, __FUNCTION__)))), desc_); } while (0) |
2560 | wi::shwi (0x111, prec)))do { const char *desc_ = "ASSERT_EQ (" "(0x100)" ", " "(wi::round_up_for_mask (wi::shwi (0x22, prec), wi::shwi (0x111, prec)))" ")"; if (((0x100)) == ((wi::round_up_for_mask (wi::shwi (0x22 , prec), wi::shwi (0x111, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2560, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2560, __FUNCTION__)))), desc_); } while (0); |
2561 | |
2562 | ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),do { const char *desc_ = "ASSERT_EQ (" "(100)" ", " "(wi::round_down_for_mask (wi::shwi (101, prec), wi::shwi (0xfc, prec)))" ")"; if (((100)) == ((wi::round_down_for_mask (wi::shwi (101 , prec), wi::shwi (0xfc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2563, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2563, __FUNCTION__)))), desc_); } while (0) |
2563 | wi::shwi (0xfc, prec)))do { const char *desc_ = "ASSERT_EQ (" "(100)" ", " "(wi::round_down_for_mask (wi::shwi (101, prec), wi::shwi (0xfc, prec)))" ")"; if (((100)) == ((wi::round_down_for_mask (wi::shwi (101 , prec), wi::shwi (0xfc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2563, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2563, __FUNCTION__)))), desc_); } while (0); |
2564 | ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),do { const char *desc_ = "ASSERT_EQ (" "(104)" ", " "(wi::round_up_for_mask (wi::shwi (101, prec), wi::shwi (0xfc, prec)))" ")"; if (((104)) == ((wi::round_up_for_mask (wi::shwi (101, prec ), wi::shwi (0xfc, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2565, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2565, __FUNCTION__)))), desc_); } while (0) |
2565 | wi::shwi (0xfc, prec)))do { const char *desc_ = "ASSERT_EQ (" "(104)" ", " "(wi::round_up_for_mask (wi::shwi (101, prec), wi::shwi (0xfc, prec)))" ")"; if (((104)) == ((wi::round_up_for_mask (wi::shwi (101, prec ), wi::shwi (0xfc, prec))))) ::selftest::pass ((((::selftest:: location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2565, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2565, __FUNCTION__)))), desc_); } while (0); |
2566 | |
2567 | ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),do { const char *desc_ = "ASSERT_EQ (" "(0x2bc)" ", " "(wi::round_down_for_mask (wi::shwi (0x2c2, prec), wi::shwi (0xabc, prec)))" ")"; if (((0x2bc)) == ((wi::round_down_for_mask (wi::shwi (0x2c2 , prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2568, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2568, __FUNCTION__)))), desc_); } while (0) |
2568 | wi::shwi (0xabc, prec)))do { const char *desc_ = "ASSERT_EQ (" "(0x2bc)" ", " "(wi::round_down_for_mask (wi::shwi (0x2c2, prec), wi::shwi (0xabc, prec)))" ")"; if (((0x2bc)) == ((wi::round_down_for_mask (wi::shwi (0x2c2 , prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2568, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2568, __FUNCTION__)))), desc_); } while (0); |
2569 | ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),do { const char *desc_ = "ASSERT_EQ (" "(0x800)" ", " "(wi::round_up_for_mask (wi::shwi (0x2c2, prec), wi::shwi (0xabc, prec)))" ")"; if (((0x800)) == ((wi::round_up_for_mask (wi::shwi (0x2c2 , prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2570, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2570, __FUNCTION__)))), desc_); } while (0) |
2570 | wi::shwi (0xabc, prec)))do { const char *desc_ = "ASSERT_EQ (" "(0x800)" ", " "(wi::round_up_for_mask (wi::shwi (0x2c2, prec), wi::shwi (0xabc, prec)))" ")"; if (((0x800)) == ((wi::round_up_for_mask (wi::shwi (0x2c2 , prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2570, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2570, __FUNCTION__)))), desc_); } while (0); |
2571 | |
2572 | ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),do { const char *desc_ = "ASSERT_EQ (" "(0xabc)" ", " "(wi::round_down_for_mask (wi::shwi (0xabd, prec), wi::shwi (0xabc, prec)))" ")"; if (((0xabc)) == ((wi::round_down_for_mask (wi::shwi (0xabd , prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2573, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2573, __FUNCTION__)))), desc_); } while (0) |
2573 | wi::shwi (0xabc, prec)))do { const char *desc_ = "ASSERT_EQ (" "(0xabc)" ", " "(wi::round_down_for_mask (wi::shwi (0xabd, prec), wi::shwi (0xabc, prec)))" ")"; if (((0xabc)) == ((wi::round_down_for_mask (wi::shwi (0xabd , prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2573, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2573, __FUNCTION__)))), desc_); } while (0); |
2574 | ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),do { const char *desc_ = "ASSERT_EQ (" "(0)" ", " "(wi::round_up_for_mask (wi::shwi (0xabd, prec), wi::shwi (0xabc, prec)))" ")"; if (((0)) == ((wi::round_up_for_mask (wi::shwi (0xabd, prec ), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2575, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2575, __FUNCTION__)))), desc_); } while (0) |
2575 | wi::shwi (0xabc, prec)))do { const char *desc_ = "ASSERT_EQ (" "(0)" ", " "(wi::round_up_for_mask (wi::shwi (0xabd, prec), wi::shwi (0xabc, prec)))" ")"; if (((0)) == ((wi::round_up_for_mask (wi::shwi (0xabd, prec ), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2575, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2575, __FUNCTION__)))), desc_); } while (0); |
2576 | |
2577 | ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),do { const char *desc_ = "ASSERT_EQ (" "(0xabc)" ", " "(wi::round_down_for_mask (wi::shwi (0x1000, prec), wi::shwi (0xabc, prec)))" ")"; if (((0xabc)) == ((wi::round_down_for_mask (wi::shwi (0x1000 , prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2578, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2578, __FUNCTION__)))), desc_); } while (0) |
2578 | wi::shwi (0xabc, prec)))do { const char *desc_ = "ASSERT_EQ (" "(0xabc)" ", " "(wi::round_down_for_mask (wi::shwi (0x1000, prec), wi::shwi (0xabc, prec)))" ")"; if (((0xabc)) == ((wi::round_down_for_mask (wi::shwi (0x1000 , prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2578, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2578, __FUNCTION__)))), desc_); } while (0); |
2579 | ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),do { const char *desc_ = "ASSERT_EQ (" "(0)" ", " "(wi::round_up_for_mask (wi::shwi (0x1000, prec), wi::shwi (0xabc, prec)))" ")"; if (((0)) == ((wi::round_up_for_mask (wi::shwi (0x1000, prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2580, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2580, __FUNCTION__)))), desc_); } while (0) |
2580 | wi::shwi (0xabc, prec)))do { const char *desc_ = "ASSERT_EQ (" "(0)" ", " "(wi::round_up_for_mask (wi::shwi (0x1000, prec), wi::shwi (0xabc, prec)))" ")"; if (((0)) == ((wi::round_up_for_mask (wi::shwi (0x1000, prec), wi::shwi (0xabc, prec))))) ::selftest::pass ((((::selftest ::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2580, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2580, __FUNCTION__)))), desc_); } while (0); |
2581 | } |
2582 | |
2583 | /* Run all of the selftests within this file, for all value types. */ |
2584 | |
2585 | void |
2586 | wide_int_cc_tests () |
2587 | { |
2588 | run_all_wide_int_tests <wide_int> (); |
2589 | run_all_wide_int_tests <offset_int> (); |
2590 | run_all_wide_int_tests <widest_int> (); |
2591 | test_overflow (); |
2592 | test_round_for_mask (); |
2593 | ASSERT_EQ (wi::mask (128, false, 128),do { const char *desc_ = "ASSERT_EQ (" "(wi::mask (128, false, 128))" ", " "(wi::shifted_mask (0, 128, false, 128))" ")"; if (((wi ::mask (128, false, 128))) == ((wi::shifted_mask (0, 128, false , 128)))) ::selftest::pass ((((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2594, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2594, __FUNCTION__)))), desc_); } while (0) |
2594 | wi::shifted_mask (0, 128, false, 128))do { const char *desc_ = "ASSERT_EQ (" "(wi::mask (128, false, 128))" ", " "(wi::shifted_mask (0, 128, false, 128))" ")"; if (((wi ::mask (128, false, 128))) == ((wi::shifted_mask (0, 128, false , 128)))) ::selftest::pass ((((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2594, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2594, __FUNCTION__)))), desc_); } while (0); |
2595 | ASSERT_EQ (wi::mask (128, true, 128),do { const char *desc_ = "ASSERT_EQ (" "(wi::mask (128, true, 128))" ", " "(wi::shifted_mask (0, 128, true, 128))" ")"; if (((wi:: mask (128, true, 128))) == ((wi::shifted_mask (0, 128, true, 128 )))) ::selftest::pass ((((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2596, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2596, __FUNCTION__)))), desc_); } while (0) |
2596 | wi::shifted_mask (0, 128, true, 128))do { const char *desc_ = "ASSERT_EQ (" "(wi::mask (128, true, 128))" ", " "(wi::shifted_mask (0, 128, true, 128))" ")"; if (((wi:: mask (128, true, 128))) == ((wi::shifted_mask (0, 128, true, 128 )))) ::selftest::pass ((((::selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2596, __FUNCTION__)))), desc_); else ::selftest::fail ((((:: selftest::location ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/wide-int.cc" , 2596, __FUNCTION__)))), desc_); } while (0); |
2597 | } |
2598 | |
2599 | } // namespace selftest |
2600 | #endif /* CHECKING_P */ |