File: | build/gcc/ggc-page.cc |
Warning: | line 1339, column 11 Although the value stored to 'bit' is used in the enclosing expression, the value is never actually read from 'bit' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* "Bag-of-pages" garbage collector for the GNU compiler. |
2 | Copyright (C) 1999-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #include "config.h" |
21 | #include "system.h" |
22 | #include "coretypes.h" |
23 | #include "backend.h" |
24 | #include "alias.h" |
25 | #include "tree.h" |
26 | #include "rtl.h" |
27 | #include "memmodel.h" |
28 | #include "tm_p.h" |
29 | #include "diagnostic-core.h" |
30 | #include "flags.h" |
31 | #include "ggc-internal.h" |
32 | #include "timevar.h" |
33 | #include "cgraph.h" |
34 | #include "cfgloop.h" |
35 | #include "plugin.h" |
36 | |
37 | /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a |
38 | file open. Prefer either to valloc. */ |
39 | #ifdef HAVE_MMAP_ANON1 |
40 | # undef HAVE_MMAP_DEV_ZERO |
41 | # define USING_MMAP |
42 | #endif |
43 | |
44 | #ifdef HAVE_MMAP_DEV_ZERO |
45 | # define USING_MMAP |
46 | #endif |
47 | |
48 | #ifndef USING_MMAP |
49 | #define USING_MALLOC_PAGE_GROUPS |
50 | #endif |
51 | |
52 | #if defined(HAVE_MADVISE1) && HAVE_DECL_MADVISE1 && defined(MADV_DONTNEED4) \ |
53 | && defined(USING_MMAP) |
54 | # define USING_MADVISE |
55 | #endif |
56 | |
57 | /* Strategy: |
58 | |
59 | This garbage-collecting allocator allocates objects on one of a set |
60 | of pages. Each page can allocate objects of a single size only; |
61 | available sizes are powers of two starting at four bytes. The size |
62 | of an allocation request is rounded up to the next power of two |
63 | (`order'), and satisfied from the appropriate page. |
64 | |
65 | Each page is recorded in a page-entry, which also maintains an |
66 | in-use bitmap of object positions on the page. This allows the |
67 | allocation state of a particular object to be flipped without |
68 | touching the page itself. |
69 | |
70 | Each page-entry also has a context depth, which is used to track |
71 | pushing and popping of allocation contexts. Only objects allocated |
72 | in the current (highest-numbered) context may be collected. |
73 | |
74 | Page entries are arranged in an array of singly-linked lists. The |
75 | array is indexed by the allocation size, in bits, of the pages on |
76 | it; i.e. all pages on a list allocate objects of the same size. |
77 | Pages are ordered on the list such that all non-full pages precede |
78 | all full pages, with non-full pages arranged in order of decreasing |
79 | context depth. |
80 | |
81 | Empty pages (of all orders) are kept on a single page cache list, |
82 | and are considered first when new pages are required; they are |
83 | deallocated at the start of the next collection if they haven't |
84 | been recycled by then. */ |
85 | |
86 | /* Define GGC_DEBUG_LEVEL to print debugging information. |
87 | 0: No debugging output. |
88 | 1: GC statistics only. |
89 | 2: Page-entry allocations/deallocations as well. |
90 | 3: Object allocations as well. |
91 | 4: Object marks as well. */ |
92 | #define GGC_DEBUG_LEVEL(0) (0) |
93 | |
94 | /* A two-level tree is used to look up the page-entry for a given |
95 | pointer. Two chunks of the pointer's bits are extracted to index |
96 | the first and second levels of the tree, as follows: |
97 | |
98 | HOST_PAGE_SIZE_BITS |
99 | 32 | | |
100 | msb +----------------+----+------+------+ lsb |
101 | | | | |
102 | PAGE_L1_BITS | |
103 | | | |
104 | PAGE_L2_BITS |
105 | |
106 | The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry |
107 | pages are aligned on system page boundaries. The next most |
108 | significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first |
109 | index values in the lookup table, respectively. |
110 | |
111 | For 32-bit architectures and the settings below, there are no |
112 | leftover bits. For architectures with wider pointers, the lookup |
113 | tree points to a list of pages, which must be scanned to find the |
114 | correct one. */ |
115 | |
116 | #define PAGE_L1_BITS(8) (8) |
117 | #define PAGE_L2_BITS(32 - (8) - G.lg_pagesize) (32 - PAGE_L1_BITS(8) - G.lg_pagesize) |
118 | #define PAGE_L1_SIZE((uintptr_t) 1 << (8)) ((uintptr_t) 1 << PAGE_L1_BITS(8)) |
119 | #define PAGE_L2_SIZE((uintptr_t) 1 << (32 - (8) - G.lg_pagesize)) ((uintptr_t) 1 << PAGE_L2_BITS(32 - (8) - G.lg_pagesize)) |
120 | |
121 | #define LOOKUP_L1(p)(((uintptr_t) (p) >> (32 - (8))) & ((1 << (8) ) - 1)) \ |
122 | (((uintptr_t) (p) >> (32 - PAGE_L1_BITS(8))) & ((1 << PAGE_L1_BITS(8)) - 1)) |
123 | |
124 | #define LOOKUP_L2(p)(((uintptr_t) (p) >> G.lg_pagesize) & ((1 << ( 32 - (8) - G.lg_pagesize)) - 1)) \ |
125 | (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS(32 - (8) - G.lg_pagesize)) - 1)) |
126 | |
127 | /* The number of objects per allocation page, for objects on a page of |
128 | the indicated ORDER. */ |
129 | #define OBJECTS_PER_PAGE(ORDER)objects_per_page_table[ORDER] objects_per_page_table[ORDER] |
130 | |
131 | /* The number of objects in P. */ |
132 | #define OBJECTS_IN_PAGE(P)((P)->bytes / object_size_table[(P)->order]) ((P)->bytes / OBJECT_SIZE ((P)->order)object_size_table[(P)->order]) |
133 | |
134 | /* The size of an object on a page of the indicated ORDER. */ |
135 | #define OBJECT_SIZE(ORDER)object_size_table[ORDER] object_size_table[ORDER] |
136 | |
137 | /* For speed, we avoid doing a general integer divide to locate the |
138 | offset in the allocation bitmap, by precalculating numbers M, S |
139 | such that (O * M) >> S == O / Z (modulo 2^32), for any offset O |
140 | within the page which is evenly divisible by the object size Z. */ |
141 | #define DIV_MULT(ORDER)inverse_table[ORDER].mult inverse_table[ORDER].mult |
142 | #define DIV_SHIFT(ORDER)inverse_table[ORDER].shift inverse_table[ORDER].shift |
143 | #define OFFSET_TO_BIT(OFFSET, ORDER)(((OFFSET) * inverse_table[ORDER].mult) >> inverse_table [ORDER].shift) \ |
144 | (((OFFSET) * DIV_MULT (ORDER)inverse_table[ORDER].mult) >> DIV_SHIFT (ORDER)inverse_table[ORDER].shift) |
145 | |
146 | /* We use this structure to determine the alignment required for |
147 | allocations. For power-of-two sized allocations, that's not a |
148 | problem, but it does matter for odd-sized allocations. |
149 | We do not care about alignment for floating-point types. */ |
150 | |
151 | struct max_alignment { |
152 | char c; |
153 | union { |
154 | int64_t i; |
155 | void *p; |
156 | } u; |
157 | }; |
158 | |
159 | /* The biggest alignment required. */ |
160 | |
161 | #define MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) (offsetof (struct max_alignment, u)__builtin_offsetof(struct max_alignment, u)) |
162 | |
163 | |
164 | /* The number of extra orders, not corresponding to power-of-two sized |
165 | objects. */ |
166 | |
167 | #define NUM_EXTRA_ORDERS(sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])) ARRAY_SIZE (extra_order_size_table)(sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])) |
168 | |
169 | #define RTL_SIZE(NSLOTS)(__builtin_offsetof(struct rtx_def, u) + (NSLOTS) * sizeof (rtunion )) \ |
170 | (RTX_HDR_SIZE__builtin_offsetof(struct rtx_def, u) + (NSLOTS) * sizeof (rtunion)) |
171 | |
172 | #define TREE_EXP_SIZE(OPS)(sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree)) \ |
173 | (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree)) |
174 | |
175 | /* The Ith entry is the maximum size of an object to be stored in the |
176 | Ith extra order. Adding a new entry to this array is the *only* |
177 | thing you need to do to add a new special allocation size. */ |
178 | |
179 | static const size_t extra_order_size_table[] = { |
180 | /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT. |
181 | There are a lot of structures with these sizes and explicitly |
182 | listing them risks orders being dropped because they changed size. */ |
183 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 3, |
184 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 5, |
185 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 6, |
186 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 7, |
187 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 9, |
188 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 10, |
189 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 11, |
190 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 12, |
191 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 13, |
192 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 14, |
193 | MAX_ALIGNMENT(__builtin_offsetof(struct max_alignment, u)) * 15, |
194 | sizeof (struct tree_decl_non_common), |
195 | sizeof (struct tree_field_decl), |
196 | sizeof (struct tree_parm_decl), |
197 | sizeof (struct tree_var_decl), |
198 | sizeof (struct tree_type_non_common), |
199 | sizeof (struct function), |
200 | sizeof (struct basic_block_def), |
201 | sizeof (struct cgraph_node), |
202 | sizeof (class loop), |
203 | }; |
204 | |
205 | /* The total number of orders. */ |
206 | |
207 | #define NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))) (HOST_BITS_PER_PTR(8 * 8) + NUM_EXTRA_ORDERS(sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))) |
208 | |
209 | /* Compute the smallest nonnegative number which when added to X gives |
210 | a multiple of F. */ |
211 | |
212 | #define ROUND_UP_VALUE(x, f)((f) - 1 - ((f) - 1 + (x)) % (f)) ((f) - 1 - ((f) - 1 + (x)) % (f)) |
213 | |
214 | /* Round X to next multiple of the page size */ |
215 | |
216 | #define PAGE_ALIGN(x)((((x)) + (G.pagesize) - 1) & ~((G.pagesize) - 1)) ROUND_UP ((x), G.pagesize)((((x)) + (G.pagesize) - 1) & ~((G.pagesize) - 1)) |
217 | |
218 | /* The Ith entry is the number of objects on a page or order I. */ |
219 | |
220 | static unsigned objects_per_page_table[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
221 | |
222 | /* The Ith entry is the size of an object on a page of order I. */ |
223 | |
224 | static size_t object_size_table[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
225 | |
226 | /* The Ith entry is a pair of numbers (mult, shift) such that |
227 | ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32, |
228 | for all k evenly divisible by OBJECT_SIZE(I). */ |
229 | |
230 | static struct |
231 | { |
232 | size_t mult; |
233 | unsigned int shift; |
234 | } |
235 | inverse_table[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
236 | |
237 | /* A page_entry records the status of an allocation page. This |
238 | structure is dynamically sized to fit the bitmap in_use_p. */ |
239 | struct page_entry |
240 | { |
241 | /* The next page-entry with objects of the same size, or NULL if |
242 | this is the last page-entry. */ |
243 | struct page_entry *next; |
244 | |
245 | /* The previous page-entry with objects of the same size, or NULL if |
246 | this is the first page-entry. The PREV pointer exists solely to |
247 | keep the cost of ggc_free manageable. */ |
248 | struct page_entry *prev; |
249 | |
250 | /* The number of bytes allocated. (This will always be a multiple |
251 | of the host system page size.) */ |
252 | size_t bytes; |
253 | |
254 | /* The address at which the memory is allocated. */ |
255 | char *page; |
256 | |
257 | #ifdef USING_MALLOC_PAGE_GROUPS |
258 | /* Back pointer to the page group this page came from. */ |
259 | struct page_group *group; |
260 | #endif |
261 | |
262 | /* This is the index in the by_depth varray where this page table |
263 | can be found. */ |
264 | unsigned long index_by_depth; |
265 | |
266 | /* Context depth of this page. */ |
267 | unsigned short context_depth; |
268 | |
269 | /* The number of free objects remaining on this page. */ |
270 | unsigned short num_free_objects; |
271 | |
272 | /* A likely candidate for the bit position of a free object for the |
273 | next allocation from this page. */ |
274 | unsigned short next_bit_hint; |
275 | |
276 | /* The lg of size of objects allocated from this page. */ |
277 | unsigned char order; |
278 | |
279 | /* Discarded page? */ |
280 | bool discarded; |
281 | |
282 | /* A bit vector indicating whether or not objects are in use. The |
283 | Nth bit is one if the Nth object on this page is allocated. This |
284 | array is dynamically sized. */ |
285 | unsigned long in_use_p[1]; |
286 | }; |
287 | |
288 | #ifdef USING_MALLOC_PAGE_GROUPS |
289 | /* A page_group describes a large allocation from malloc, from which |
290 | we parcel out aligned pages. */ |
291 | struct page_group |
292 | { |
293 | /* A linked list of all extant page groups. */ |
294 | struct page_group *next; |
295 | |
296 | /* The address we received from malloc. */ |
297 | char *allocation; |
298 | |
299 | /* The size of the block. */ |
300 | size_t alloc_size; |
301 | |
302 | /* A bitmask of pages in use. */ |
303 | unsigned int in_use; |
304 | }; |
305 | #endif |
306 | |
307 | #if HOST_BITS_PER_PTR(8 * 8) <= 32 |
308 | |
309 | /* On 32-bit hosts, we use a two level page table, as pictured above. */ |
310 | typedef page_entry **page_table[PAGE_L1_SIZE((uintptr_t) 1 << (8))]; |
311 | |
312 | #else |
313 | |
314 | /* On 64-bit hosts, we use the same two level page tables plus a linked |
315 | list that disambiguates the top 32-bits. There will almost always be |
316 | exactly one entry in the list. */ |
317 | typedef struct page_table_chain |
318 | { |
319 | struct page_table_chain *next; |
320 | size_t high_bits; |
321 | page_entry **table[PAGE_L1_SIZE((uintptr_t) 1 << (8))]; |
322 | } *page_table; |
323 | |
324 | #endif |
325 | |
326 | class finalizer |
327 | { |
328 | public: |
329 | finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {} |
330 | |
331 | void *addr () const { return m_addr; } |
332 | |
333 | void call () const { m_function (m_addr); } |
334 | |
335 | private: |
336 | void *m_addr; |
337 | void (*m_function)(void *); |
338 | }; |
339 | |
340 | class vec_finalizer |
341 | { |
342 | public: |
343 | vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) : |
344 | m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {} |
345 | |
346 | void call () const |
347 | { |
348 | for (size_t i = 0; i < m_n_objects; i++) |
349 | m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size))); |
350 | } |
351 | |
352 | void *addr () const { return reinterpret_cast<void *> (m_addr); } |
353 | |
354 | private: |
355 | uintptr_t m_addr; |
356 | void (*m_function)(void *); |
357 | size_t m_object_size; |
358 | size_t m_n_objects; |
359 | }; |
360 | |
361 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
362 | /* List of free objects to be verified as actually free on the |
363 | next collection. */ |
364 | struct free_object |
365 | { |
366 | void *object; |
367 | struct free_object *next; |
368 | }; |
369 | #endif |
370 | |
371 | /* The rest of the global variables. */ |
372 | static struct ggc_globals |
373 | { |
374 | /* The Nth element in this array is a page with objects of size 2^N. |
375 | If there are any pages with free objects, they will be at the |
376 | head of the list. NULL if there are no page-entries for this |
377 | object size. */ |
378 | page_entry *pages[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
379 | |
380 | /* The Nth element in this array is the last page with objects of |
381 | size 2^N. NULL if there are no page-entries for this object |
382 | size. */ |
383 | page_entry *page_tails[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
384 | |
385 | /* Lookup table for associating allocation pages with object addresses. */ |
386 | page_table lookup; |
387 | |
388 | /* The system's page size. */ |
389 | size_t pagesize; |
390 | size_t lg_pagesize; |
391 | |
392 | /* Bytes currently allocated. */ |
393 | size_t allocated; |
394 | |
395 | /* Bytes currently allocated at the end of the last collection. */ |
396 | size_t allocated_last_gc; |
397 | |
398 | /* Total amount of memory mapped. */ |
399 | size_t bytes_mapped; |
400 | |
401 | /* Bit N set if any allocations have been done at context depth N. */ |
402 | unsigned long context_depth_allocations; |
403 | |
404 | /* Bit N set if any collections have been done at context depth N. */ |
405 | unsigned long context_depth_collections; |
406 | |
407 | /* The current depth in the context stack. */ |
408 | unsigned short context_depth; |
409 | |
410 | /* A file descriptor open to /dev/zero for reading. */ |
411 | #if defined (HAVE_MMAP_DEV_ZERO) |
412 | int dev_zero_fd; |
413 | #endif |
414 | |
415 | /* A cache of free system pages. */ |
416 | page_entry *free_pages; |
417 | |
418 | #ifdef USING_MALLOC_PAGE_GROUPS |
419 | page_group *page_groups; |
420 | #endif |
421 | |
422 | /* The file descriptor for debugging output. */ |
423 | FILE *debug_file; |
424 | |
425 | /* Current number of elements in use in depth below. */ |
426 | unsigned int depth_in_use; |
427 | |
428 | /* Maximum number of elements that can be used before resizing. */ |
429 | unsigned int depth_max; |
430 | |
431 | /* Each element of this array is an index in by_depth where the given |
432 | depth starts. This structure is indexed by that given depth we |
433 | are interested in. */ |
434 | unsigned int *depth; |
435 | |
436 | /* Current number of elements in use in by_depth below. */ |
437 | unsigned int by_depth_in_use; |
438 | |
439 | /* Maximum number of elements that can be used before resizing. */ |
440 | unsigned int by_depth_max; |
441 | |
442 | /* Each element of this array is a pointer to a page_entry, all |
443 | page_entries can be found in here by increasing depth. |
444 | index_by_depth in the page_entry is the index into this data |
445 | structure where that page_entry can be found. This is used to |
446 | speed up finding all page_entries at a particular depth. */ |
447 | page_entry **by_depth; |
448 | |
449 | /* Each element is a pointer to the saved in_use_p bits, if any, |
450 | zero otherwise. We allocate them all together, to enable a |
451 | better runtime data access pattern. */ |
452 | unsigned long **save_in_use; |
453 | |
454 | /* Finalizers for single objects. The first index is collection_depth. */ |
455 | vec<vec<finalizer> > finalizers; |
456 | |
457 | /* Finalizers for vectors of objects. */ |
458 | vec<vec<vec_finalizer> > vec_finalizers; |
459 | |
460 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
461 | /* List of free objects to be verified as actually free on the |
462 | next collection. */ |
463 | struct free_object *free_object_list; |
464 | #endif |
465 | |
466 | struct |
467 | { |
468 | /* Total GC-allocated memory. */ |
469 | unsigned long long total_allocated; |
470 | /* Total overhead for GC-allocated memory. */ |
471 | unsigned long long total_overhead; |
472 | |
473 | /* Total allocations and overhead for sizes less than 32, 64 and 128. |
474 | These sizes are interesting because they are typical cache line |
475 | sizes. */ |
476 | |
477 | unsigned long long total_allocated_under32; |
478 | unsigned long long total_overhead_under32; |
479 | |
480 | unsigned long long total_allocated_under64; |
481 | unsigned long long total_overhead_under64; |
482 | |
483 | unsigned long long total_allocated_under128; |
484 | unsigned long long total_overhead_under128; |
485 | |
486 | /* The allocations for each of the allocation orders. */ |
487 | unsigned long long total_allocated_per_order[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
488 | |
489 | /* The overhead for each of the allocation orders. */ |
490 | unsigned long long total_overhead_per_order[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
491 | } stats; |
492 | } G; |
493 | |
494 | /* True if a gc is currently taking place. */ |
495 | |
496 | static bool in_gc = false; |
497 | |
498 | /* The size in bytes required to maintain a bitmap for the objects |
499 | on a page-entry. */ |
500 | #define BITMAP_SIZE(Num_objects)(((((Num_objects)) + ((8 * 8)) - 1) / ((8 * 8))) * sizeof (long )) \ |
501 | (CEIL ((Num_objects), HOST_BITS_PER_LONG)((((Num_objects)) + ((8 * 8)) - 1) / ((8 * 8))) * sizeof (long)) |
502 | |
503 | /* Allocate pages in chunks of this size, to throttle calls to memory |
504 | allocation routines. The first page is used, the rest go onto the |
505 | free list. This cannot be larger than HOST_BITS_PER_INT for the |
506 | in_use bitmask for page_group. Hosts that need a different value |
507 | can override this by defining GGC_QUIRE_SIZE explicitly. */ |
508 | #ifndef GGC_QUIRE_SIZE512 |
509 | # ifdef USING_MMAP |
510 | # define GGC_QUIRE_SIZE512 512 /* 2MB for 4K pages */ |
511 | # else |
512 | # define GGC_QUIRE_SIZE512 16 |
513 | # endif |
514 | #endif |
515 | |
516 | /* Initial guess as to how many page table entries we might need. */ |
517 | #define INITIAL_PTE_COUNT128 128 |
518 | |
519 | static page_entry *lookup_page_table_entry (const void *); |
520 | static void set_page_table_entry (void *, page_entry *); |
521 | #ifdef USING_MMAP |
522 | static char *alloc_anon (char *, size_t, bool check); |
523 | #endif |
524 | #ifdef USING_MALLOC_PAGE_GROUPS |
525 | static size_t page_group_index (char *, char *); |
526 | static void set_page_group_in_use (page_group *, char *); |
527 | static void clear_page_group_in_use (page_group *, char *); |
528 | #endif |
529 | static struct page_entry * alloc_page (unsigned); |
530 | static void free_page (struct page_entry *); |
531 | static void clear_marks (void); |
532 | static void sweep_pages (void); |
533 | static void ggc_recalculate_in_use_p (page_entry *); |
534 | static void compute_inverse (unsigned); |
535 | static inline void adjust_depth (void); |
536 | static void move_ptes_to_front (int, int); |
537 | |
538 | void debug_print_page_list (int); |
539 | static void push_depth (unsigned int); |
540 | static void push_by_depth (page_entry *, unsigned long *); |
541 | |
542 | /* Push an entry onto G.depth. */ |
543 | |
544 | inline static void |
545 | push_depth (unsigned int i) |
546 | { |
547 | if (G.depth_in_use >= G.depth_max) |
548 | { |
549 | G.depth_max *= 2; |
550 | G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max)((unsigned int *) xrealloc ((void *) (G.depth), sizeof (unsigned int) * (G.depth_max))); |
551 | } |
552 | G.depth[G.depth_in_use++] = i; |
553 | } |
554 | |
555 | /* Push an entry onto G.by_depth and G.save_in_use. */ |
556 | |
557 | inline static void |
558 | push_by_depth (page_entry *p, unsigned long *s) |
559 | { |
560 | if (G.by_depth_in_use >= G.by_depth_max) |
561 | { |
562 | G.by_depth_max *= 2; |
563 | G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max)((page_entry * *) xrealloc ((void *) (G.by_depth), sizeof (page_entry *) * (G.by_depth_max))); |
564 | G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,((unsigned long * *) xrealloc ((void *) (G.save_in_use), sizeof (unsigned long *) * (G.by_depth_max))) |
565 | G.by_depth_max)((unsigned long * *) xrealloc ((void *) (G.save_in_use), sizeof (unsigned long *) * (G.by_depth_max))); |
566 | } |
567 | G.by_depth[G.by_depth_in_use] = p; |
568 | G.save_in_use[G.by_depth_in_use++] = s; |
569 | } |
570 | |
571 | #if (GCC_VERSION(4 * 1000 + 2) < 3001) |
572 | #define prefetch(X)__builtin_prefetch (X) ((void) X) |
573 | #else |
574 | #define prefetch(X)__builtin_prefetch (X) __builtin_prefetch (X) |
575 | #endif |
576 | |
577 | #define save_in_use_p_i(__i)(G.save_in_use[__i]) \ |
578 | (G.save_in_use[__i]) |
579 | #define save_in_use_p(__p)((G.save_in_use[__p->index_by_depth])) \ |
580 | (save_in_use_p_i (__p->index_by_depth)(G.save_in_use[__p->index_by_depth])) |
581 | |
582 | /* Traverse the page table and find the entry for a page. |
583 | If the object wasn't allocated in GC return NULL. */ |
584 | |
585 | static inline page_entry * |
586 | safe_lookup_page_table_entry (const void *p) |
587 | { |
588 | page_entry ***base; |
589 | size_t L1, L2; |
590 | |
591 | #if HOST_BITS_PER_PTR(8 * 8) <= 32 |
592 | base = &G.lookup[0]; |
593 | #else |
594 | page_table table = G.lookup; |
595 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
596 | while (1) |
597 | { |
598 | if (table == NULLnullptr) |
599 | return NULLnullptr; |
600 | if (table->high_bits == high_bits) |
601 | break; |
602 | table = table->next; |
603 | } |
604 | base = &table->table[0]; |
605 | #endif |
606 | |
607 | /* Extract the level 1 and 2 indices. */ |
608 | L1 = LOOKUP_L1 (p)(((uintptr_t) (p) >> (32 - (8))) & ((1 << (8) ) - 1)); |
609 | L2 = LOOKUP_L2 (p)(((uintptr_t) (p) >> G.lg_pagesize) & ((1 << ( 32 - (8) - G.lg_pagesize)) - 1)); |
610 | if (! base[L1]) |
611 | return NULLnullptr; |
612 | |
613 | return base[L1][L2]; |
614 | } |
615 | |
616 | /* Traverse the page table and find the entry for a page. |
617 | Die (probably) if the object wasn't allocated via GC. */ |
618 | |
619 | static inline page_entry * |
620 | lookup_page_table_entry (const void *p) |
621 | { |
622 | page_entry ***base; |
623 | size_t L1, L2; |
624 | |
625 | #if HOST_BITS_PER_PTR(8 * 8) <= 32 |
626 | base = &G.lookup[0]; |
627 | #else |
628 | page_table table = G.lookup; |
629 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
630 | while (table->high_bits != high_bits) |
631 | table = table->next; |
632 | base = &table->table[0]; |
633 | #endif |
634 | |
635 | /* Extract the level 1 and 2 indices. */ |
636 | L1 = LOOKUP_L1 (p)(((uintptr_t) (p) >> (32 - (8))) & ((1 << (8) ) - 1)); |
637 | L2 = LOOKUP_L2 (p)(((uintptr_t) (p) >> G.lg_pagesize) & ((1 << ( 32 - (8) - G.lg_pagesize)) - 1)); |
638 | |
639 | return base[L1][L2]; |
640 | } |
641 | |
642 | /* Set the page table entry for a page. */ |
643 | |
644 | static void |
645 | set_page_table_entry (void *p, page_entry *entry) |
646 | { |
647 | page_entry ***base; |
648 | size_t L1, L2; |
649 | |
650 | #if HOST_BITS_PER_PTR(8 * 8) <= 32 |
651 | base = &G.lookup[0]; |
652 | #else |
653 | page_table table; |
654 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
655 | for (table = G.lookup; table; table = table->next) |
656 | if (table->high_bits == high_bits) |
657 | goto found; |
658 | |
659 | /* Not found -- allocate a new table. */ |
660 | table = XCNEW (struct page_table_chain)((struct page_table_chain *) xcalloc (1, sizeof (struct page_table_chain ))); |
661 | table->next = G.lookup; |
662 | table->high_bits = high_bits; |
663 | G.lookup = table; |
664 | found: |
665 | base = &table->table[0]; |
666 | #endif |
667 | |
668 | /* Extract the level 1 and 2 indices. */ |
669 | L1 = LOOKUP_L1 (p)(((uintptr_t) (p) >> (32 - (8))) & ((1 << (8) ) - 1)); |
670 | L2 = LOOKUP_L2 (p)(((uintptr_t) (p) >> G.lg_pagesize) & ((1 << ( 32 - (8) - G.lg_pagesize)) - 1)); |
671 | |
672 | if (base[L1] == NULLnullptr) |
673 | base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE)((page_entry * *) xcalloc ((((uintptr_t) 1 << (32 - (8) - G.lg_pagesize))), sizeof (page_entry *))); |
674 | |
675 | base[L1][L2] = entry; |
676 | } |
677 | |
678 | /* Prints the page-entry for object size ORDER, for debugging. */ |
679 | |
680 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
681 | debug_print_page_list (int order) |
682 | { |
683 | page_entry *p; |
684 | printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order], |
685 | (void *) G.page_tails[order]); |
686 | p = G.pages[order]; |
687 | while (p != NULLnullptr) |
688 | { |
689 | printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth, |
690 | p->num_free_objects); |
691 | p = p->next; |
692 | } |
693 | printf ("NULL\n"); |
694 | fflush (stdoutstdout); |
695 | } |
696 | |
697 | #ifdef USING_MMAP |
698 | /* Allocate SIZE bytes of anonymous memory, preferably near PREF, |
699 | (if non-null). The ifdef structure here is intended to cause a |
700 | compile error unless exactly one of the HAVE_* is defined. */ |
701 | |
702 | static inline char * |
703 | alloc_anon (char *pref ATTRIBUTE_UNUSED__attribute__ ((__unused__)), size_t size, bool check) |
704 | { |
705 | #ifdef HAVE_MMAP_ANON1 |
706 | char *page = (char *) mmap (pref, size, PROT_READ0x1 | PROT_WRITE0x2, |
707 | MAP_PRIVATE0x02 | MAP_ANONYMOUS0x20, -1, 0); |
708 | #endif |
709 | #ifdef HAVE_MMAP_DEV_ZERO |
710 | char *page = (char *) mmap (pref, size, PROT_READ0x1 | PROT_WRITE0x2, |
711 | MAP_PRIVATE0x02, G.dev_zero_fd, 0); |
712 | #endif |
713 | |
714 | if (page == (char *) MAP_FAILED((void *) -1)) |
715 | { |
716 | if (!check) |
717 | return NULLnullptr; |
718 | perror ("virtual memory exhausted"); |
719 | exit (FATAL_EXIT_CODE1); |
720 | } |
721 | |
722 | /* Remember that we allocated this memory. */ |
723 | G.bytes_mapped += size; |
724 | |
725 | /* Pretend we don't have access to the allocated pages. We'll enable |
726 | access to smaller pieces of the area in ggc_internal_alloc. Discard the |
727 | handle to avoid handle leak. */ |
728 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); |
729 | |
730 | return page; |
731 | } |
732 | #endif |
733 | #ifdef USING_MALLOC_PAGE_GROUPS |
734 | /* Compute the index for this page into the page group. */ |
735 | |
736 | static inline size_t |
737 | page_group_index (char *allocation, char *page) |
738 | { |
739 | return (size_t) (page - allocation) >> G.lg_pagesize; |
740 | } |
741 | |
742 | /* Set and clear the in_use bit for this page in the page group. */ |
743 | |
744 | static inline void |
745 | set_page_group_in_use (page_group *group, char *page) |
746 | { |
747 | group->in_use |= 1 << page_group_index (group->allocation, page); |
748 | } |
749 | |
750 | static inline void |
751 | clear_page_group_in_use (page_group *group, char *page) |
752 | { |
753 | group->in_use &= ~(1 << page_group_index (group->allocation, page)); |
754 | } |
755 | #endif |
756 | |
757 | /* Allocate a new page for allocating objects of size 2^ORDER, |
758 | and return an entry for it. The entry is not added to the |
759 | appropriate page_table list. */ |
760 | |
761 | static inline struct page_entry * |
762 | alloc_page (unsigned order) |
763 | { |
764 | struct page_entry *entry, *p, **pp; |
765 | char *page; |
766 | size_t num_objects; |
767 | size_t bitmap_size; |
768 | size_t page_entry_size; |
769 | size_t entry_size; |
770 | #ifdef USING_MALLOC_PAGE_GROUPS |
771 | page_group *group; |
772 | #endif |
773 | |
774 | num_objects = OBJECTS_PER_PAGE (order)objects_per_page_table[order]; |
775 | bitmap_size = BITMAP_SIZE (num_objects + 1)(((((num_objects + 1)) + ((8 * 8)) - 1) / ((8 * 8))) * sizeof (long)); |
776 | page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; |
777 | entry_size = num_objects * OBJECT_SIZE (order)object_size_table[order]; |
778 | if (entry_size < G.pagesize) |
779 | entry_size = G.pagesize; |
780 | entry_size = PAGE_ALIGN (entry_size)((((entry_size)) + (G.pagesize) - 1) & ~((G.pagesize) - 1 )); |
781 | |
782 | entry = NULLnullptr; |
783 | page = NULLnullptr; |
784 | |
785 | /* Check the list of free pages for one we can use. */ |
786 | for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp) |
787 | if (p->bytes == entry_size) |
788 | break; |
789 | |
790 | if (p != NULLnullptr) |
791 | { |
792 | if (p->discarded) |
793 | G.bytes_mapped += p->bytes; |
794 | p->discarded = false; |
795 | |
796 | /* Recycle the allocated memory from this page ... */ |
797 | *pp = p->next; |
798 | page = p->page; |
799 | |
800 | #ifdef USING_MALLOC_PAGE_GROUPS |
801 | group = p->group; |
802 | #endif |
803 | |
804 | /* ... and, if possible, the page entry itself. */ |
805 | if (p->order == order) |
806 | { |
807 | entry = p; |
808 | memset (entry, 0, page_entry_size); |
809 | } |
810 | else |
811 | free (p); |
812 | } |
813 | #ifdef USING_MMAP |
814 | else if (entry_size == G.pagesize) |
815 | { |
816 | /* We want just one page. Allocate a bunch of them and put the |
817 | extras on the freelist. (Can only do this optimization with |
818 | mmap for backing store.) */ |
819 | struct page_entry *e, *f = G.free_pages; |
820 | int i, entries = GGC_QUIRE_SIZE512; |
821 | |
822 | page = alloc_anon (NULLnullptr, G.pagesize * GGC_QUIRE_SIZE512, false); |
823 | if (page == NULLnullptr) |
824 | { |
825 | page = alloc_anon (NULLnullptr, G.pagesize, true); |
826 | entries = 1; |
827 | } |
828 | |
829 | /* This loop counts down so that the chain will be in ascending |
830 | memory order. */ |
831 | for (i = entries - 1; i >= 1; i--) |
832 | { |
833 | e = XCNEWVAR (struct page_entry, page_entry_size)((struct page_entry *) xcalloc (1, (page_entry_size))); |
834 | e->order = order; |
835 | e->bytes = G.pagesize; |
836 | e->page = page + (i << G.lg_pagesize); |
837 | e->next = f; |
838 | f = e; |
839 | } |
840 | |
841 | G.free_pages = f; |
842 | } |
843 | else |
844 | page = alloc_anon (NULLnullptr, entry_size, true); |
845 | #endif |
846 | #ifdef USING_MALLOC_PAGE_GROUPS |
847 | else |
848 | { |
849 | /* Allocate a large block of memory and serve out the aligned |
850 | pages therein. This results in much less memory wastage |
851 | than the traditional implementation of valloc. */ |
852 | |
853 | char *allocation, *a, *enda; |
854 | size_t alloc_size, head_slop, tail_slop; |
855 | int multiple_pages = (entry_size == G.pagesize); |
856 | |
857 | if (multiple_pages) |
858 | alloc_size = GGC_QUIRE_SIZE512 * G.pagesize; |
859 | else |
860 | alloc_size = entry_size + G.pagesize - 1; |
861 | allocation = XNEWVEC (char, alloc_size)((char *) xmalloc (sizeof (char) * (alloc_size))); |
862 | |
863 | page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize); |
864 | head_slop = page - allocation; |
865 | if (multiple_pages) |
866 | tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); |
867 | else |
868 | tail_slop = alloc_size - entry_size - head_slop; |
869 | enda = allocation + alloc_size - tail_slop; |
870 | |
871 | /* We allocated N pages, which are likely not aligned, leaving |
872 | us with N-1 usable pages. We plan to place the page_group |
873 | structure somewhere in the slop. */ |
874 | if (head_slop >= sizeof (page_group)) |
875 | group = (page_group *)page - 1; |
876 | else |
877 | { |
878 | /* We magically got an aligned allocation. Too bad, we have |
879 | to waste a page anyway. */ |
880 | if (tail_slop == 0) |
881 | { |
882 | enda -= G.pagesize; |
883 | tail_slop += G.pagesize; |
884 | } |
885 | gcc_assert (tail_slop >= sizeof (page_group))((void)(!(tail_slop >= sizeof (page_group)) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 885, __FUNCTION__), 0 : 0)); |
886 | group = (page_group *)enda; |
887 | tail_slop -= sizeof (page_group); |
888 | } |
889 | |
890 | /* Remember that we allocated this memory. */ |
891 | group->next = G.page_groups; |
892 | group->allocation = allocation; |
893 | group->alloc_size = alloc_size; |
894 | group->in_use = 0; |
895 | G.page_groups = group; |
896 | G.bytes_mapped += alloc_size; |
897 | |
898 | /* If we allocated multiple pages, put the rest on the free list. */ |
899 | if (multiple_pages) |
900 | { |
901 | struct page_entry *e, *f = G.free_pages; |
902 | for (a = enda - G.pagesize; a != page; a -= G.pagesize) |
903 | { |
904 | e = XCNEWVAR (struct page_entry, page_entry_size)((struct page_entry *) xcalloc (1, (page_entry_size))); |
905 | e->order = order; |
906 | e->bytes = G.pagesize; |
907 | e->page = a; |
908 | e->group = group; |
909 | e->next = f; |
910 | f = e; |
911 | } |
912 | G.free_pages = f; |
913 | } |
914 | } |
915 | #endif |
916 | |
917 | if (entry == NULLnullptr) |
918 | entry = XCNEWVAR (struct page_entry, page_entry_size)((struct page_entry *) xcalloc (1, (page_entry_size))); |
919 | |
920 | entry->bytes = entry_size; |
921 | entry->page = page; |
922 | entry->context_depth = G.context_depth; |
923 | entry->order = order; |
924 | entry->num_free_objects = num_objects; |
925 | entry->next_bit_hint = 1; |
926 | |
927 | G.context_depth_allocations |= (unsigned long)1 << G.context_depth; |
928 | |
929 | #ifdef USING_MALLOC_PAGE_GROUPS |
930 | entry->group = group; |
931 | set_page_group_in_use (group, page); |
932 | #endif |
933 | |
934 | /* Set the one-past-the-end in-use bit. This acts as a sentry as we |
935 | increment the hint. */ |
936 | entry->in_use_p[num_objects / HOST_BITS_PER_LONG(8 * 8)] |
937 | = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG(8 * 8)); |
938 | |
939 | set_page_table_entry (page, entry); |
940 | |
941 | if (GGC_DEBUG_LEVEL(0) >= 2) |
942 | fprintf (G.debug_file, |
943 | "Allocating page at %p, object size=%lu, data %p-%p\n", |
944 | (void *) entry, (unsigned long) OBJECT_SIZE (order)object_size_table[order], |
945 | (void *) page, (void *) (page + entry_size - 1)); |
946 | |
947 | return entry; |
948 | } |
949 | |
950 | /* Adjust the size of G.depth so that no index greater than the one |
951 | used by the top of the G.by_depth is used. */ |
952 | |
953 | static inline void |
954 | adjust_depth (void) |
955 | { |
956 | page_entry *top; |
957 | |
958 | if (G.by_depth_in_use) |
959 | { |
960 | top = G.by_depth[G.by_depth_in_use-1]; |
961 | |
962 | /* Peel back indices in depth that index into by_depth, so that |
963 | as new elements are added to by_depth, we note the indices |
964 | of those elements, if they are for new context depths. */ |
965 | while (G.depth_in_use > (size_t)top->context_depth+1) |
966 | --G.depth_in_use; |
967 | } |
968 | } |
969 | |
970 | /* For a page that is no longer needed, put it on the free page list. */ |
971 | |
972 | static void |
973 | free_page (page_entry *entry) |
974 | { |
975 | if (GGC_DEBUG_LEVEL(0) >= 2) |
976 | fprintf (G.debug_file, |
977 | "Deallocating page at %p, data %p-%p\n", (void *) entry, |
978 | (void *) entry->page, (void *) (entry->page + entry->bytes - 1)); |
979 | |
980 | /* Mark the page as inaccessible. Discard the handle to avoid handle |
981 | leak. */ |
982 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes)); |
983 | |
984 | set_page_table_entry (entry->page, NULLnullptr); |
985 | |
986 | #ifdef USING_MALLOC_PAGE_GROUPS |
987 | clear_page_group_in_use (entry->group, entry->page); |
988 | #endif |
989 | |
990 | if (G.by_depth_in_use > 1) |
991 | { |
992 | page_entry *top = G.by_depth[G.by_depth_in_use-1]; |
993 | int i = entry->index_by_depth; |
994 | |
995 | /* We cannot free a page from a context deeper than the current |
996 | one. */ |
997 | gcc_assert (entry->context_depth == top->context_depth)((void)(!(entry->context_depth == top->context_depth) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 997, __FUNCTION__), 0 : 0)); |
998 | |
999 | /* Put top element into freed slot. */ |
1000 | G.by_depth[i] = top; |
1001 | G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; |
1002 | top->index_by_depth = i; |
1003 | } |
1004 | --G.by_depth_in_use; |
1005 | |
1006 | adjust_depth (); |
1007 | |
1008 | entry->next = G.free_pages; |
1009 | G.free_pages = entry; |
1010 | } |
1011 | |
1012 | /* Release the free page cache to the system. */ |
1013 | |
1014 | static void |
1015 | release_pages (void) |
1016 | { |
1017 | size_t n1 = 0; |
1018 | size_t n2 = 0; |
1019 | #ifdef USING_MADVISE |
1020 | page_entry *p, *start_p; |
1021 | char *start; |
1022 | size_t len; |
1023 | size_t mapped_len; |
1024 | page_entry *next, *prev, *newprev; |
1025 | size_t free_unit = (GGC_QUIRE_SIZE512/2) * G.pagesize; |
1026 | |
1027 | /* First free larger continuous areas to the OS. |
1028 | This allows other allocators to grab these areas if needed. |
1029 | This is only done on larger chunks to avoid fragmentation. |
1030 | This does not always work because the free_pages list is only |
1031 | approximately sorted. */ |
1032 | |
1033 | p = G.free_pages; |
1034 | prev = NULLnullptr; |
1035 | while (p) |
1036 | { |
1037 | start = p->page; |
1038 | start_p = p; |
1039 | len = 0; |
1040 | mapped_len = 0; |
1041 | newprev = prev; |
1042 | while (p && p->page == start + len) |
1043 | { |
1044 | len += p->bytes; |
1045 | if (!p->discarded) |
1046 | mapped_len += p->bytes; |
1047 | newprev = p; |
1048 | p = p->next; |
1049 | } |
1050 | if (len >= free_unit) |
1051 | { |
1052 | while (start_p != p) |
1053 | { |
1054 | next = start_p->next; |
1055 | free (start_p); |
1056 | start_p = next; |
1057 | } |
1058 | munmap (start, len); |
1059 | if (prev) |
1060 | prev->next = p; |
1061 | else |
1062 | G.free_pages = p; |
1063 | G.bytes_mapped -= mapped_len; |
1064 | n1 += len; |
1065 | continue; |
1066 | } |
1067 | prev = newprev; |
1068 | } |
1069 | |
1070 | /* Now give back the fragmented pages to the OS, but keep the address |
1071 | space to reuse it next time. */ |
1072 | |
1073 | for (p = G.free_pages; p; ) |
1074 | { |
1075 | if (p->discarded) |
1076 | { |
1077 | p = p->next; |
1078 | continue; |
1079 | } |
1080 | start = p->page; |
1081 | len = p->bytes; |
1082 | start_p = p; |
1083 | p = p->next; |
1084 | while (p && p->page == start + len) |
1085 | { |
1086 | len += p->bytes; |
1087 | p = p->next; |
1088 | } |
1089 | /* Give the page back to the kernel, but don't free the mapping. |
1090 | This avoids fragmentation in the virtual memory map of the |
1091 | process. Next time we can reuse it by just touching it. */ |
1092 | madvise (start, len, MADV_DONTNEED4); |
1093 | /* Don't count those pages as mapped to not touch the garbage collector |
1094 | unnecessarily. */ |
1095 | G.bytes_mapped -= len; |
1096 | n2 += len; |
1097 | while (start_p != p) |
1098 | { |
1099 | start_p->discarded = true; |
1100 | start_p = start_p->next; |
1101 | } |
1102 | } |
1103 | #endif |
1104 | #if defined(USING_MMAP) && !defined(USING_MADVISE) |
1105 | page_entry *p, *next; |
1106 | char *start; |
1107 | size_t len; |
1108 | |
1109 | /* Gather up adjacent pages so they are unmapped together. */ |
1110 | p = G.free_pages; |
1111 | |
1112 | while (p) |
1113 | { |
1114 | start = p->page; |
1115 | next = p->next; |
1116 | len = p->bytes; |
1117 | free (p); |
1118 | p = next; |
1119 | |
1120 | while (p && p->page == start + len) |
1121 | { |
1122 | next = p->next; |
1123 | len += p->bytes; |
1124 | free (p); |
1125 | p = next; |
1126 | } |
1127 | |
1128 | munmap (start, len); |
1129 | n1 += len; |
1130 | G.bytes_mapped -= len; |
1131 | } |
1132 | |
1133 | G.free_pages = NULLnullptr; |
1134 | #endif |
1135 | #ifdef USING_MALLOC_PAGE_GROUPS |
1136 | page_entry **pp, *p; |
1137 | page_group **gp, *g; |
1138 | |
1139 | /* Remove all pages from free page groups from the list. */ |
1140 | pp = &G.free_pages; |
1141 | while ((p = *pp) != NULLnullptr) |
1142 | if (p->group->in_use == 0) |
1143 | { |
1144 | *pp = p->next; |
1145 | free (p); |
1146 | } |
1147 | else |
1148 | pp = &p->next; |
1149 | |
1150 | /* Remove all free page groups, and release the storage. */ |
1151 | gp = &G.page_groups; |
1152 | while ((g = *gp) != NULLnullptr) |
1153 | if (g->in_use == 0) |
1154 | { |
1155 | *gp = g->next; |
1156 | G.bytes_mapped -= g->alloc_size; |
1157 | n1 += g->alloc_size; |
1158 | free (g->allocation); |
1159 | } |
1160 | else |
1161 | gp = &g->next; |
1162 | #endif |
1163 | if (!quiet_flagglobal_options.x_quiet_flag && (n1 || n2)) |
1164 | { |
1165 | fprintf (stderrstderr, " {GC"); |
1166 | if (n1) |
1167 | fprintf (stderrstderr, " released " PRsa (0)"%" "0" "l" "u" "%c", SIZE_AMOUNT (n1)(uint64_t)(((n1) < 10 * 1024 ? (n1) : ((n1) < 10 * (1024 * 1024) ? (n1) / 1024 : (n1) / (1024 * 1024)))), ((n1) < 10 * 1024 ? ' ' : ((n1) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
1168 | if (n2) |
1169 | fprintf (stderrstderr, " madv_dontneed " PRsa (0)"%" "0" "l" "u" "%c", SIZE_AMOUNT (n2)(uint64_t)(((n2) < 10 * 1024 ? (n2) : ((n2) < 10 * (1024 * 1024) ? (n2) / 1024 : (n2) / (1024 * 1024)))), ((n2) < 10 * 1024 ? ' ' : ((n2) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
1170 | fprintf (stderrstderr, "}"); |
1171 | } |
1172 | } |
1173 | |
1174 | /* This table provides a fast way to determine ceil(log_2(size)) for |
1175 | allocation requests. The minimum allocation size is eight bytes. */ |
1176 | #define NUM_SIZE_LOOKUP512 512 |
1177 | static unsigned char size_lookup[NUM_SIZE_LOOKUP512] = |
1178 | { |
1179 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, |
1180 | 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
1181 | 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, |
1182 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, |
1183 | 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
1184 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
1185 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
1186 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
1187 | 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1188 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1189 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1190 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1191 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1192 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1193 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1194 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1195 | 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1196 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1197 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1198 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1199 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1200 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1201 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1202 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1203 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1204 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1205 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1206 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1207 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1208 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1209 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1210 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 |
1211 | }; |
1212 | |
1213 | /* For a given size of memory requested for allocation, return the |
1214 | actual size that is going to be allocated, as well as the size |
1215 | order. */ |
1216 | |
1217 | static void |
1218 | ggc_round_alloc_size_1 (size_t requested_size, |
1219 | size_t *size_order, |
1220 | size_t *alloced_size) |
1221 | { |
1222 | size_t order, object_size; |
1223 | |
1224 | if (requested_size < NUM_SIZE_LOOKUP512) |
1225 | { |
1226 | order = size_lookup[requested_size]; |
1227 | object_size = OBJECT_SIZE (order)object_size_table[order]; |
1228 | } |
1229 | else |
1230 | { |
1231 | order = 10; |
1232 | while (requested_size > (object_size = OBJECT_SIZE (order)object_size_table[order])) |
1233 | order++; |
1234 | } |
1235 | |
1236 | if (size_order) |
1237 | *size_order = order; |
1238 | if (alloced_size) |
1239 | *alloced_size = object_size; |
1240 | } |
1241 | |
1242 | /* For a given size of memory requested for allocation, return the |
1243 | actual size that is going to be allocated. */ |
1244 | |
1245 | size_t |
1246 | ggc_round_alloc_size (size_t requested_size) |
1247 | { |
1248 | size_t size = 0; |
1249 | |
1250 | ggc_round_alloc_size_1 (requested_size, NULLnullptr, &size); |
1251 | return size; |
1252 | } |
1253 | |
1254 | /* Push a finalizer onto the appropriate vec. */ |
1255 | |
1256 | static void |
1257 | add_finalizer (void *result, void (*f)(void *), size_t s, size_t n) |
1258 | { |
1259 | if (f == NULLnullptr) |
1260 | /* No finalizer. */; |
1261 | else if (n == 1) |
1262 | { |
1263 | finalizer fin (result, f); |
1264 | G.finalizers[G.context_depth].safe_push (fin); |
1265 | } |
1266 | else |
1267 | { |
1268 | vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n); |
1269 | G.vec_finalizers[G.context_depth].safe_push (fin); |
1270 | } |
1271 | } |
1272 | |
1273 | /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ |
1274 | |
1275 | void * |
1276 | ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n |
1277 | MEM_STAT_DECL) |
1278 | { |
1279 | size_t order, word, bit, object_offset, object_size; |
1280 | struct page_entry *entry; |
1281 | void *result; |
1282 | |
1283 | ggc_round_alloc_size_1 (size, &order, &object_size); |
1284 | |
1285 | /* If there are non-full pages for this size allocation, they are at |
1286 | the head of the list. */ |
1287 | entry = G.pages[order]; |
1288 | |
1289 | /* If there is no page for this object size, or all pages in this |
1290 | context are full, allocate a new page. */ |
1291 | if (entry == NULLnullptr || entry->num_free_objects == 0) |
1292 | { |
1293 | struct page_entry *new_entry; |
1294 | new_entry = alloc_page (order); |
1295 | |
1296 | new_entry->index_by_depth = G.by_depth_in_use; |
1297 | push_by_depth (new_entry, 0); |
1298 | |
1299 | /* We can skip context depths, if we do, make sure we go all the |
1300 | way to the new depth. */ |
1301 | while (new_entry->context_depth >= G.depth_in_use) |
1302 | push_depth (G.by_depth_in_use-1); |
1303 | |
1304 | /* If this is the only entry, it's also the tail. If it is not |
1305 | the only entry, then we must update the PREV pointer of the |
1306 | ENTRY (G.pages[order]) to point to our new page entry. */ |
1307 | if (entry == NULLnullptr) |
1308 | G.page_tails[order] = new_entry; |
1309 | else |
1310 | entry->prev = new_entry; |
1311 | |
1312 | /* Put new pages at the head of the page list. By definition the |
1313 | entry at the head of the list always has a NULL pointer. */ |
1314 | new_entry->next = entry; |
1315 | new_entry->prev = NULLnullptr; |
1316 | entry = new_entry; |
1317 | G.pages[order] = new_entry; |
1318 | |
1319 | /* For a new page, we know the word and bit positions (in the |
1320 | in_use bitmap) of the first available object -- they're zero. */ |
1321 | new_entry->next_bit_hint = 1; |
1322 | word = 0; |
1323 | bit = 0; |
1324 | object_offset = 0; |
1325 | } |
1326 | else |
1327 | { |
1328 | /* First try to use the hint left from the previous allocation |
1329 | to locate a clear bit in the in-use bitmap. We've made sure |
1330 | that the one-past-the-end bit is always set, so if the hint |
1331 | has run over, this test will fail. */ |
1332 | unsigned hint = entry->next_bit_hint; |
1333 | word = hint / HOST_BITS_PER_LONG(8 * 8); |
1334 | bit = hint % HOST_BITS_PER_LONG(8 * 8); |
1335 | |
1336 | /* If the hint didn't work, scan the bitmap from the beginning. */ |
1337 | if ((entry->in_use_p[word] >> bit) & 1) |
1338 | { |
1339 | word = bit = 0; |
Although the value stored to 'bit' is used in the enclosing expression, the value is never actually read from 'bit' | |
1340 | while (~entry->in_use_p[word] == 0) |
1341 | ++word; |
1342 | |
1343 | #if GCC_VERSION(4 * 1000 + 2) >= 3004 |
1344 | bit = __builtin_ctzl (~entry->in_use_p[word]); |
1345 | #else |
1346 | while ((entry->in_use_p[word] >> bit) & 1) |
1347 | ++bit; |
1348 | #endif |
1349 | |
1350 | hint = word * HOST_BITS_PER_LONG(8 * 8) + bit; |
1351 | } |
1352 | |
1353 | /* Next time, try the next bit. */ |
1354 | entry->next_bit_hint = hint + 1; |
1355 | |
1356 | object_offset = hint * object_size; |
1357 | } |
1358 | |
1359 | /* Set the in-use bit. */ |
1360 | entry->in_use_p[word] |= ((unsigned long) 1 << bit); |
1361 | |
1362 | /* Keep a running total of the number of free objects. If this page |
1363 | fills up, we may have to move it to the end of the list if the |
1364 | next page isn't full. If the next page is full, all subsequent |
1365 | pages are full, so there's no need to move it. */ |
1366 | if (--entry->num_free_objects == 0 |
1367 | && entry->next != NULLnullptr |
1368 | && entry->next->num_free_objects > 0) |
1369 | { |
1370 | /* We have a new head for the list. */ |
1371 | G.pages[order] = entry->next; |
1372 | |
1373 | /* We are moving ENTRY to the end of the page table list. |
1374 | The new page at the head of the list will have NULL in |
1375 | its PREV field and ENTRY will have NULL in its NEXT field. */ |
1376 | entry->next->prev = NULLnullptr; |
1377 | entry->next = NULLnullptr; |
1378 | |
1379 | /* Append ENTRY to the tail of the list. */ |
1380 | entry->prev = G.page_tails[order]; |
1381 | G.page_tails[order]->next = entry; |
1382 | G.page_tails[order] = entry; |
1383 | } |
1384 | |
1385 | /* Calculate the object's address. */ |
1386 | result = entry->page + object_offset; |
1387 | if (GATHER_STATISTICS0) |
1388 | ggc_record_overhead (OBJECT_SIZE (order)object_size_table[order], OBJECT_SIZE (order)object_size_table[order] - size, |
1389 | result FINAL_PASS_MEM_STAT, 0,0,0); |
1390 | |
1391 | #ifdef ENABLE_GC_CHECKING1 |
1392 | /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the |
1393 | exact same semantics in presence of memory bugs, regardless of |
1394 | ENABLE_VALGRIND_CHECKING. We override this request below. Drop the |
1395 | handle to avoid handle leak. */ |
1396 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size)); |
1397 | |
1398 | /* `Poison' the entire allocated object, including any padding at |
1399 | the end. */ |
1400 | memset (result, 0xaf, object_size); |
1401 | |
1402 | /* Make the bytes after the end of the object unaccessible. Discard the |
1403 | handle to avoid handle leak. */ |
1404 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size, |
1405 | object_size - size)); |
1406 | #endif |
1407 | |
1408 | /* Tell Valgrind that the memory is there, but its content isn't |
1409 | defined. The bytes at the end of the object are still marked |
1410 | unaccessible. */ |
1411 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size)); |
1412 | |
1413 | /* Keep track of how many bytes are being allocated. This |
1414 | information is used in deciding when to collect. */ |
1415 | G.allocated += object_size; |
1416 | |
1417 | /* For timevar statistics. */ |
1418 | timevar_ggc_mem_total += object_size; |
1419 | |
1420 | if (f) |
1421 | add_finalizer (result, f, s, n); |
1422 | |
1423 | if (GATHER_STATISTICS0) |
1424 | { |
1425 | size_t overhead = object_size - size; |
1426 | |
1427 | G.stats.total_overhead += overhead; |
1428 | G.stats.total_allocated += object_size; |
1429 | G.stats.total_overhead_per_order[order] += overhead; |
1430 | G.stats.total_allocated_per_order[order] += object_size; |
1431 | |
1432 | if (size <= 32) |
1433 | { |
1434 | G.stats.total_overhead_under32 += overhead; |
1435 | G.stats.total_allocated_under32 += object_size; |
1436 | } |
1437 | if (size <= 64) |
1438 | { |
1439 | G.stats.total_overhead_under64 += overhead; |
1440 | G.stats.total_allocated_under64 += object_size; |
1441 | } |
1442 | if (size <= 128) |
1443 | { |
1444 | G.stats.total_overhead_under128 += overhead; |
1445 | G.stats.total_allocated_under128 += object_size; |
1446 | } |
1447 | } |
1448 | |
1449 | if (GGC_DEBUG_LEVEL(0) >= 3) |
1450 | fprintf (G.debug_file, |
1451 | "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", |
1452 | (unsigned long) size, (unsigned long) object_size, result, |
1453 | (void *) entry); |
1454 | |
1455 | return result; |
1456 | } |
1457 | |
1458 | /* Mark function for strings. */ |
1459 | |
1460 | void |
1461 | gt_ggc_m_S (const void *p) |
1462 | { |
1463 | page_entry *entry; |
1464 | unsigned bit, word; |
1465 | unsigned long mask; |
1466 | unsigned long offset; |
1467 | |
1468 | if (!p) |
1469 | return; |
1470 | |
1471 | /* Look up the page on which the object is alloced. If it was not |
1472 | GC allocated, gracefully bail out. */ |
1473 | entry = safe_lookup_page_table_entry (p); |
1474 | if (!entry) |
1475 | return; |
1476 | |
1477 | /* Calculate the index of the object on the page; this is its bit |
1478 | position in the in_use_p bitmap. Note that because a char* might |
1479 | point to the middle of an object, we need special code here to |
1480 | make sure P points to the start of an object. */ |
1481 | offset = ((const char *) p - entry->page) % object_size_table[entry->order]; |
1482 | if (offset) |
1483 | { |
1484 | /* Here we've seen a char* which does not point to the beginning |
1485 | of an allocated object. We assume it points to the middle of |
1486 | a STRING_CST. */ |
1487 | gcc_assert (offset == offsetof (struct tree_string, str))((void)(!(offset == __builtin_offsetof(struct tree_string, str )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 1487, __FUNCTION__), 0 : 0)); |
1488 | p = ((const char *) p) - offset; |
1489 | gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p)(const_cast<void *> ((p)))); |
1490 | return; |
1491 | } |
1492 | |
1493 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order)(((((const char *) p) - entry->page) * inverse_table[entry ->order].mult) >> inverse_table[entry->order].shift ); |
1494 | word = bit / HOST_BITS_PER_LONG(8 * 8); |
1495 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG(8 * 8)); |
1496 | |
1497 | /* If the bit was previously set, skip it. */ |
1498 | if (entry->in_use_p[word] & mask) |
1499 | return; |
1500 | |
1501 | /* Otherwise set it, and decrement the free object count. */ |
1502 | entry->in_use_p[word] |= mask; |
1503 | entry->num_free_objects -= 1; |
1504 | |
1505 | if (GGC_DEBUG_LEVEL(0) >= 4) |
1506 | fprintf (G.debug_file, "Marking %p\n", p); |
1507 | |
1508 | return; |
1509 | } |
1510 | |
1511 | |
1512 | /* User-callable entry points for marking string X. */ |
1513 | |
1514 | void |
1515 | gt_ggc_mx (const char *& x) |
1516 | { |
1517 | gt_ggc_m_S (x); |
1518 | } |
1519 | |
1520 | void |
1521 | gt_ggc_mx (char *& x) |
1522 | { |
1523 | gt_ggc_m_S (x); |
1524 | } |
1525 | |
1526 | void |
1527 | gt_ggc_mx (unsigned char *& x) |
1528 | { |
1529 | gt_ggc_m_S (x); |
1530 | } |
1531 | |
1532 | void |
1533 | gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
1534 | { |
1535 | } |
1536 | |
1537 | /* If P is not marked, marks it and return false. Otherwise return true. |
1538 | P must have been allocated by the GC allocator; it mustn't point to |
1539 | static objects, stack variables, or memory allocated with malloc. */ |
1540 | |
1541 | int |
1542 | ggc_set_mark (const void *p) |
1543 | { |
1544 | page_entry *entry; |
1545 | unsigned bit, word; |
1546 | unsigned long mask; |
1547 | |
1548 | /* Look up the page on which the object is alloced. If the object |
1549 | wasn't allocated by the collector, we'll probably die. */ |
1550 | entry = lookup_page_table_entry (p); |
1551 | gcc_assert (entry)((void)(!(entry) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 1551, __FUNCTION__), 0 : 0)); |
1552 | |
1553 | /* Calculate the index of the object on the page; this is its bit |
1554 | position in the in_use_p bitmap. */ |
1555 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order)(((((const char *) p) - entry->page) * inverse_table[entry ->order].mult) >> inverse_table[entry->order].shift ); |
1556 | word = bit / HOST_BITS_PER_LONG(8 * 8); |
1557 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG(8 * 8)); |
1558 | |
1559 | /* If the bit was previously set, skip it. */ |
1560 | if (entry->in_use_p[word] & mask) |
1561 | return 1; |
1562 | |
1563 | /* Otherwise set it, and decrement the free object count. */ |
1564 | entry->in_use_p[word] |= mask; |
1565 | entry->num_free_objects -= 1; |
1566 | |
1567 | if (GGC_DEBUG_LEVEL(0) >= 4) |
1568 | fprintf (G.debug_file, "Marking %p\n", p); |
1569 | |
1570 | return 0; |
1571 | } |
1572 | |
1573 | /* Return 1 if P has been marked, zero otherwise. |
1574 | P must have been allocated by the GC allocator; it mustn't point to |
1575 | static objects, stack variables, or memory allocated with malloc. */ |
1576 | |
1577 | int |
1578 | ggc_marked_p (const void *p) |
1579 | { |
1580 | page_entry *entry; |
1581 | unsigned bit, word; |
1582 | unsigned long mask; |
1583 | |
1584 | /* Look up the page on which the object is alloced. If the object |
1585 | wasn't allocated by the collector, we'll probably die. */ |
1586 | entry = lookup_page_table_entry (p); |
1587 | gcc_assert (entry)((void)(!(entry) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 1587, __FUNCTION__), 0 : 0)); |
1588 | |
1589 | /* Calculate the index of the object on the page; this is its bit |
1590 | position in the in_use_p bitmap. */ |
1591 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order)(((((const char *) p) - entry->page) * inverse_table[entry ->order].mult) >> inverse_table[entry->order].shift ); |
1592 | word = bit / HOST_BITS_PER_LONG(8 * 8); |
1593 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG(8 * 8)); |
1594 | |
1595 | return (entry->in_use_p[word] & mask) != 0; |
1596 | } |
1597 | |
1598 | /* Return the size of the gc-able object P. */ |
1599 | |
1600 | size_t |
1601 | ggc_get_size (const void *p) |
1602 | { |
1603 | page_entry *pe = lookup_page_table_entry (p); |
1604 | return OBJECT_SIZE (pe->order)object_size_table[pe->order]; |
1605 | } |
1606 | |
1607 | /* Release the memory for object P. */ |
1608 | |
1609 | void |
1610 | ggc_free (void *p) |
1611 | { |
1612 | if (in_gc) |
1613 | return; |
1614 | |
1615 | page_entry *pe = lookup_page_table_entry (p); |
1616 | size_t order = pe->order; |
1617 | size_t size = OBJECT_SIZE (order)object_size_table[order]; |
1618 | |
1619 | if (GATHER_STATISTICS0) |
1620 | ggc_free_overhead (p); |
1621 | |
1622 | if (GGC_DEBUG_LEVEL(0) >= 3) |
1623 | fprintf (G.debug_file, |
1624 | "Freeing object, actual size=%lu, at %p on %p\n", |
1625 | (unsigned long) size, p, (void *) pe); |
1626 | |
1627 | #ifdef ENABLE_GC_CHECKING1 |
1628 | /* Poison the data, to indicate the data is garbage. */ |
1629 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size)); |
1630 | memset (p, 0xa5, size); |
1631 | #endif |
1632 | /* Let valgrind know the object is free. */ |
1633 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size)); |
1634 | |
1635 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
1636 | /* In the completely-anal-checking mode, we do *not* immediately free |
1637 | the data, but instead verify that the data is *actually* not |
1638 | reachable the next time we collect. */ |
1639 | { |
1640 | struct free_object *fo = XNEW (struct free_object)((struct free_object *) xmalloc (sizeof (struct free_object)) ); |
1641 | fo->object = p; |
1642 | fo->next = G.free_object_list; |
1643 | G.free_object_list = fo; |
1644 | } |
1645 | #else |
1646 | { |
1647 | unsigned int bit_offset, word, bit; |
1648 | |
1649 | G.allocated -= size; |
1650 | |
1651 | /* Mark the object not-in-use. */ |
1652 | bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order)(((((const char *) p) - pe->page) * inverse_table[order].mult ) >> inverse_table[order].shift); |
1653 | word = bit_offset / HOST_BITS_PER_LONG(8 * 8); |
1654 | bit = bit_offset % HOST_BITS_PER_LONG(8 * 8); |
1655 | pe->in_use_p[word] &= ~(1UL << bit); |
1656 | |
1657 | if (pe->num_free_objects++ == 0) |
1658 | { |
1659 | page_entry *p, *q; |
1660 | |
1661 | /* If the page is completely full, then it's supposed to |
1662 | be after all pages that aren't. Since we've freed one |
1663 | object from a page that was full, we need to move the |
1664 | page to the head of the list. |
1665 | |
1666 | PE is the node we want to move. Q is the previous node |
1667 | and P is the next node in the list. */ |
1668 | q = pe->prev; |
1669 | if (q && q->num_free_objects == 0) |
1670 | { |
1671 | p = pe->next; |
1672 | |
1673 | q->next = p; |
1674 | |
1675 | /* If PE was at the end of the list, then Q becomes the |
1676 | new end of the list. If PE was not the end of the |
1677 | list, then we need to update the PREV field for P. */ |
1678 | if (!p) |
1679 | G.page_tails[order] = q; |
1680 | else |
1681 | p->prev = q; |
1682 | |
1683 | /* Move PE to the head of the list. */ |
1684 | pe->next = G.pages[order]; |
1685 | pe->prev = NULLnullptr; |
1686 | G.pages[order]->prev = pe; |
1687 | G.pages[order] = pe; |
1688 | } |
1689 | |
1690 | /* Reset the hint bit to point to the only free object. */ |
1691 | pe->next_bit_hint = bit_offset; |
1692 | } |
1693 | } |
1694 | #endif |
1695 | } |
1696 | |
1697 | /* Subroutine of init_ggc which computes the pair of numbers used to |
1698 | perform division by OBJECT_SIZE (order) and fills in inverse_table[]. |
1699 | |
1700 | This algorithm is taken from Granlund and Montgomery's paper |
1701 | "Division by Invariant Integers using Multiplication" |
1702 | (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by |
1703 | constants). */ |
1704 | |
1705 | static void |
1706 | compute_inverse (unsigned order) |
1707 | { |
1708 | size_t size, inv; |
1709 | unsigned int e; |
1710 | |
1711 | size = OBJECT_SIZE (order)object_size_table[order]; |
1712 | e = 0; |
1713 | while (size % 2 == 0) |
1714 | { |
1715 | e++; |
1716 | size >>= 1; |
1717 | } |
1718 | |
1719 | inv = size; |
1720 | while (inv * size != 1) |
1721 | inv = inv * (2 - inv*size); |
1722 | |
1723 | DIV_MULT (order)inverse_table[order].mult = inv; |
1724 | DIV_SHIFT (order)inverse_table[order].shift = e; |
1725 | } |
1726 | |
1727 | /* Initialize the ggc-mmap allocator. */ |
1728 | void |
1729 | init_ggc (void) |
1730 | { |
1731 | static bool init_p = false; |
1732 | unsigned order; |
1733 | |
1734 | if (init_p) |
1735 | return; |
1736 | init_p = true; |
1737 | |
1738 | G.pagesize = getpagesize (); |
1739 | G.lg_pagesize = exact_log2 (G.pagesize); |
1740 | |
1741 | #ifdef HAVE_MMAP_DEV_ZERO |
1742 | G.dev_zero_fd = open ("/dev/zero", O_RDONLY00); |
1743 | if (G.dev_zero_fd == -1) |
1744 | internal_error ("open /dev/zero: %m"); |
1745 | #endif |
1746 | |
1747 | #if 0 |
1748 | G.debug_file = fopen ("ggc-mmap.debug", "w"); |
1749 | #else |
1750 | G.debug_file = stdoutstdout; |
1751 | #endif |
1752 | |
1753 | #ifdef USING_MMAP |
1754 | /* StunOS has an amazing off-by-one error for the first mmap allocation |
1755 | after fiddling with RLIMIT_STACK. The result, as hard as it is to |
1756 | believe, is an unaligned page allocation, which would cause us to |
1757 | hork badly if we tried to use it. */ |
1758 | { |
1759 | char *p = alloc_anon (NULLnullptr, G.pagesize, true); |
1760 | struct page_entry *e; |
1761 | if ((uintptr_t)p & (G.pagesize - 1)) |
1762 | { |
1763 | /* How losing. Discard this one and try another. If we still |
1764 | can't get something useful, give up. */ |
1765 | |
1766 | p = alloc_anon (NULLnullptr, G.pagesize, true); |
1767 | gcc_assert (!((uintptr_t)p & (G.pagesize - 1)))((void)(!(!((uintptr_t)p & (G.pagesize - 1))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 1767, __FUNCTION__), 0 : 0)); |
1768 | } |
1769 | |
1770 | /* We have a good page, might as well hold onto it... */ |
1771 | e = XCNEW (struct page_entry)((struct page_entry *) xcalloc (1, sizeof (struct page_entry) )); |
1772 | e->bytes = G.pagesize; |
1773 | e->page = p; |
1774 | e->next = G.free_pages; |
1775 | G.free_pages = e; |
1776 | } |
1777 | #endif |
1778 | |
1779 | /* Initialize the object size table. */ |
1780 | for (order = 0; order < HOST_BITS_PER_PTR(8 * 8); ++order) |
1781 | object_size_table[order] = (size_t) 1 << order; |
1782 | for (order = HOST_BITS_PER_PTR(8 * 8); order < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); ++order) |
1783 | { |
1784 | size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR(8 * 8)]; |
1785 | |
1786 | /* If S is not a multiple of the MAX_ALIGNMENT, then round it up |
1787 | so that we're sure of getting aligned memory. */ |
1788 | s = ROUND_UP (s, MAX_ALIGNMENT)(((s) + ((__builtin_offsetof(struct max_alignment, u))) - 1) & ~(((__builtin_offsetof(struct max_alignment, u))) - 1)); |
1789 | object_size_table[order] = s; |
1790 | } |
1791 | |
1792 | /* Initialize the objects-per-page and inverse tables. */ |
1793 | for (order = 0; order < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); ++order) |
1794 | { |
1795 | objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order)object_size_table[order]; |
1796 | if (objects_per_page_table[order] == 0) |
1797 | objects_per_page_table[order] = 1; |
1798 | compute_inverse (order); |
1799 | } |
1800 | |
1801 | /* Reset the size_lookup array to put appropriately sized objects in |
1802 | the special orders. All objects bigger than the previous power |
1803 | of two, but no greater than the special size, should go in the |
1804 | new order. */ |
1805 | for (order = HOST_BITS_PER_PTR(8 * 8); order < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); ++order) |
1806 | { |
1807 | int o; |
1808 | int i; |
1809 | |
1810 | i = OBJECT_SIZE (order)object_size_table[order]; |
1811 | if (i >= NUM_SIZE_LOOKUP512) |
1812 | continue; |
1813 | |
1814 | for (o = size_lookup[i]; o == size_lookup [i]; --i) |
1815 | size_lookup[i] = order; |
1816 | } |
1817 | |
1818 | G.depth_in_use = 0; |
1819 | G.depth_max = 10; |
1820 | G.depth = XNEWVEC (unsigned int, G.depth_max)((unsigned int *) xmalloc (sizeof (unsigned int) * (G.depth_max ))); |
1821 | |
1822 | G.by_depth_in_use = 0; |
1823 | G.by_depth_max = INITIAL_PTE_COUNT128; |
1824 | G.by_depth = XNEWVEC (page_entry *, G.by_depth_max)((page_entry * *) xmalloc (sizeof (page_entry *) * (G.by_depth_max ))); |
1825 | G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max)((unsigned long * *) xmalloc (sizeof (unsigned long *) * (G.by_depth_max ))); |
1826 | |
1827 | /* Allocate space for the depth 0 finalizers. */ |
1828 | G.finalizers.safe_push (vNULL); |
1829 | G.vec_finalizers.safe_push (vNULL); |
1830 | gcc_assert (G.finalizers.length() == 1)((void)(!(G.finalizers.length() == 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 1830, __FUNCTION__), 0 : 0)); |
1831 | } |
1832 | |
1833 | /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P |
1834 | reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ |
1835 | |
1836 | static void |
1837 | ggc_recalculate_in_use_p (page_entry *p) |
1838 | { |
1839 | unsigned int i; |
1840 | size_t num_objects; |
1841 | |
1842 | /* Because the past-the-end bit in in_use_p is always set, we |
1843 | pretend there is one additional object. */ |
1844 | num_objects = OBJECTS_IN_PAGE (p)((p)->bytes / object_size_table[(p)->order]) + 1; |
1845 | |
1846 | /* Reset the free object count. */ |
1847 | p->num_free_objects = num_objects; |
1848 | |
1849 | /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */ |
1850 | for (i = 0; |
1851 | i < CEIL (BITMAP_SIZE (num_objects),((((((((num_objects)) + ((8 * 8)) - 1) / ((8 * 8))) * sizeof ( long))) + (sizeof (*p->in_use_p)) - 1) / (sizeof (*p->in_use_p ))) |
1852 | sizeof (*p->in_use_p))((((((((num_objects)) + ((8 * 8)) - 1) / ((8 * 8))) * sizeof ( long))) + (sizeof (*p->in_use_p)) - 1) / (sizeof (*p->in_use_p ))); |
1853 | ++i) |
1854 | { |
1855 | unsigned long j; |
1856 | |
1857 | /* Something is in use if it is marked, or if it was in use in a |
1858 | context further down the context stack. */ |
1859 | p->in_use_p[i] |= save_in_use_p (p)((G.save_in_use[p->index_by_depth]))[i]; |
1860 | |
1861 | /* Decrement the free object count for every object allocated. */ |
1862 | for (j = p->in_use_p[i]; j; j >>= 1) |
1863 | p->num_free_objects -= (j & 1); |
1864 | } |
1865 | |
1866 | gcc_assert (p->num_free_objects < num_objects)((void)(!(p->num_free_objects < num_objects) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 1866, __FUNCTION__), 0 : 0)); |
1867 | } |
1868 | |
1869 | /* Unmark all objects. */ |
1870 | |
1871 | static void |
1872 | clear_marks (void) |
1873 | { |
1874 | unsigned order; |
1875 | |
1876 | for (order = 2; order < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); order++) |
1877 | { |
1878 | page_entry *p; |
1879 | |
1880 | for (p = G.pages[order]; p != NULLnullptr; p = p->next) |
1881 | { |
1882 | size_t num_objects = OBJECTS_IN_PAGE (p)((p)->bytes / object_size_table[(p)->order]); |
1883 | size_t bitmap_size = BITMAP_SIZE (num_objects + 1)(((((num_objects + 1)) + ((8 * 8)) - 1) / ((8 * 8))) * sizeof (long)); |
1884 | |
1885 | /* The data should be page-aligned. */ |
1886 | gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)))((void)(!(!((uintptr_t) p->page & (G.pagesize - 1))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 1886, __FUNCTION__), 0 : 0)); |
1887 | |
1888 | /* Pages that aren't in the topmost context are not collected; |
1889 | nevertheless, we need their in-use bit vectors to store GC |
1890 | marks. So, back them up first. */ |
1891 | if (p->context_depth < G.context_depth) |
1892 | { |
1893 | if (! save_in_use_p (p)((G.save_in_use[p->index_by_depth]))) |
1894 | save_in_use_p (p)((G.save_in_use[p->index_by_depth])) = XNEWVAR (unsigned long, bitmap_size)((unsigned long *) xmalloc ((bitmap_size))); |
1895 | memcpy (save_in_use_p (p)((G.save_in_use[p->index_by_depth])), p->in_use_p, bitmap_size); |
1896 | } |
1897 | |
1898 | /* Reset reset the number of free objects and clear the |
1899 | in-use bits. These will be adjusted by mark_obj. */ |
1900 | p->num_free_objects = num_objects; |
1901 | memset (p->in_use_p, 0, bitmap_size); |
1902 | |
1903 | /* Make sure the one-past-the-end bit is always set. */ |
1904 | p->in_use_p[num_objects / HOST_BITS_PER_LONG(8 * 8)] |
1905 | = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG(8 * 8))); |
1906 | } |
1907 | } |
1908 | } |
1909 | |
1910 | /* Check if any blocks with a registered finalizer have become unmarked. If so |
1911 | run the finalizer and unregister it because the block is about to be freed. |
1912 | Note that no garantee is made about what order finalizers will run in so |
1913 | touching other objects in gc memory is extremely unwise. */ |
1914 | |
1915 | static void |
1916 | ggc_handle_finalizers () |
1917 | { |
1918 | unsigned dlen = G.finalizers.length(); |
1919 | for (unsigned d = G.context_depth; d < dlen; ++d) |
1920 | { |
1921 | vec<finalizer> &v = G.finalizers[d]; |
1922 | unsigned length = v.length (); |
1923 | for (unsigned int i = 0; i < length;) |
1924 | { |
1925 | finalizer &f = v[i]; |
1926 | if (!ggc_marked_p (f.addr ())) |
1927 | { |
1928 | f.call (); |
1929 | v.unordered_remove (i); |
1930 | length--; |
1931 | } |
1932 | else |
1933 | i++; |
1934 | } |
1935 | } |
1936 | |
1937 | gcc_assert (dlen == G.vec_finalizers.length())((void)(!(dlen == G.vec_finalizers.length()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 1937, __FUNCTION__), 0 : 0)); |
1938 | for (unsigned d = G.context_depth; d < dlen; ++d) |
1939 | { |
1940 | vec<vec_finalizer> &vv = G.vec_finalizers[d]; |
1941 | unsigned length = vv.length (); |
1942 | for (unsigned int i = 0; i < length;) |
1943 | { |
1944 | vec_finalizer &f = vv[i]; |
1945 | if (!ggc_marked_p (f.addr ())) |
1946 | { |
1947 | f.call (); |
1948 | vv.unordered_remove (i); |
1949 | length--; |
1950 | } |
1951 | else |
1952 | i++; |
1953 | } |
1954 | } |
1955 | } |
1956 | |
1957 | /* Free all empty pages. Partially empty pages need no attention |
1958 | because the `mark' bit doubles as an `unused' bit. */ |
1959 | |
1960 | static void |
1961 | sweep_pages (void) |
1962 | { |
1963 | unsigned order; |
1964 | |
1965 | for (order = 2; order < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); order++) |
1966 | { |
1967 | /* The last page-entry to consider, regardless of entries |
1968 | placed at the end of the list. */ |
1969 | page_entry * const last = G.page_tails[order]; |
1970 | |
1971 | size_t num_objects; |
1972 | size_t live_objects; |
1973 | page_entry *p, *previous; |
1974 | int done; |
1975 | |
1976 | p = G.pages[order]; |
1977 | if (p == NULLnullptr) |
1978 | continue; |
1979 | |
1980 | previous = NULLnullptr; |
1981 | do |
1982 | { |
1983 | page_entry *next = p->next; |
1984 | |
1985 | /* Loop until all entries have been examined. */ |
1986 | done = (p == last); |
1987 | |
1988 | num_objects = OBJECTS_IN_PAGE (p)((p)->bytes / object_size_table[(p)->order]); |
1989 | |
1990 | /* Add all live objects on this page to the count of |
1991 | allocated memory. */ |
1992 | live_objects = num_objects - p->num_free_objects; |
1993 | |
1994 | G.allocated += OBJECT_SIZE (order)object_size_table[order] * live_objects; |
1995 | |
1996 | /* Only objects on pages in the topmost context should get |
1997 | collected. */ |
1998 | if (p->context_depth < G.context_depth) |
1999 | ; |
2000 | |
2001 | /* Remove the page if it's empty. */ |
2002 | else if (live_objects == 0) |
2003 | { |
2004 | /* If P was the first page in the list, then NEXT |
2005 | becomes the new first page in the list, otherwise |
2006 | splice P out of the forward pointers. */ |
2007 | if (! previous) |
2008 | G.pages[order] = next; |
2009 | else |
2010 | previous->next = next; |
2011 | |
2012 | /* Splice P out of the back pointers too. */ |
2013 | if (next) |
2014 | next->prev = previous; |
2015 | |
2016 | /* Are we removing the last element? */ |
2017 | if (p == G.page_tails[order]) |
2018 | G.page_tails[order] = previous; |
2019 | free_page (p); |
2020 | p = previous; |
2021 | } |
2022 | |
2023 | /* If the page is full, move it to the end. */ |
2024 | else if (p->num_free_objects == 0) |
2025 | { |
2026 | /* Don't move it if it's already at the end. */ |
2027 | if (p != G.page_tails[order]) |
2028 | { |
2029 | /* Move p to the end of the list. */ |
2030 | p->next = NULLnullptr; |
2031 | p->prev = G.page_tails[order]; |
2032 | G.page_tails[order]->next = p; |
2033 | |
2034 | /* Update the tail pointer... */ |
2035 | G.page_tails[order] = p; |
2036 | |
2037 | /* ... and the head pointer, if necessary. */ |
2038 | if (! previous) |
2039 | G.pages[order] = next; |
2040 | else |
2041 | previous->next = next; |
2042 | |
2043 | /* And update the backpointer in NEXT if necessary. */ |
2044 | if (next) |
2045 | next->prev = previous; |
2046 | |
2047 | p = previous; |
2048 | } |
2049 | } |
2050 | |
2051 | /* If we've fallen through to here, it's a page in the |
2052 | topmost context that is neither full nor empty. Such a |
2053 | page must precede pages at lesser context depth in the |
2054 | list, so move it to the head. */ |
2055 | else if (p != G.pages[order]) |
2056 | { |
2057 | previous->next = p->next; |
2058 | |
2059 | /* Update the backchain in the next node if it exists. */ |
2060 | if (p->next) |
2061 | p->next->prev = previous; |
2062 | |
2063 | /* Move P to the head of the list. */ |
2064 | p->next = G.pages[order]; |
2065 | p->prev = NULLnullptr; |
2066 | G.pages[order]->prev = p; |
2067 | |
2068 | /* Update the head pointer. */ |
2069 | G.pages[order] = p; |
2070 | |
2071 | /* Are we moving the last element? */ |
2072 | if (G.page_tails[order] == p) |
2073 | G.page_tails[order] = previous; |
2074 | p = previous; |
2075 | } |
2076 | |
2077 | previous = p; |
2078 | p = next; |
2079 | } |
2080 | while (! done); |
2081 | |
2082 | /* Now, restore the in_use_p vectors for any pages from contexts |
2083 | other than the current one. */ |
2084 | for (p = G.pages[order]; p; p = p->next) |
2085 | if (p->context_depth != G.context_depth) |
2086 | ggc_recalculate_in_use_p (p); |
2087 | } |
2088 | } |
2089 | |
2090 | #ifdef ENABLE_GC_CHECKING1 |
2091 | /* Clobber all free objects. */ |
2092 | |
2093 | static void |
2094 | poison_pages (void) |
2095 | { |
2096 | unsigned order; |
2097 | |
2098 | for (order = 2; order < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); order++) |
2099 | { |
2100 | size_t size = OBJECT_SIZE (order)object_size_table[order]; |
2101 | page_entry *p; |
2102 | |
2103 | for (p = G.pages[order]; p != NULLnullptr; p = p->next) |
2104 | { |
2105 | size_t num_objects; |
2106 | size_t i; |
2107 | |
2108 | if (p->context_depth != G.context_depth) |
2109 | /* Since we don't do any collection for pages in pushed |
2110 | contexts, there's no need to do any poisoning. And |
2111 | besides, the IN_USE_P array isn't valid until we pop |
2112 | contexts. */ |
2113 | continue; |
2114 | |
2115 | num_objects = OBJECTS_IN_PAGE (p)((p)->bytes / object_size_table[(p)->order]); |
2116 | for (i = 0; i < num_objects; i++) |
2117 | { |
2118 | size_t word, bit; |
2119 | word = i / HOST_BITS_PER_LONG(8 * 8); |
2120 | bit = i % HOST_BITS_PER_LONG(8 * 8); |
2121 | if (((p->in_use_p[word] >> bit) & 1) == 0) |
2122 | { |
2123 | char *object = p->page + i * size; |
2124 | |
2125 | /* Keep poison-by-write when we expect to use Valgrind, |
2126 | so the exact same memory semantics is kept, in case |
2127 | there are memory errors. We override this request |
2128 | below. */ |
2129 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object, |
2130 | size)); |
2131 | memset (object, 0xa5, size); |
2132 | |
2133 | /* Drop the handle to avoid handle leak. */ |
2134 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size)); |
2135 | } |
2136 | } |
2137 | } |
2138 | } |
2139 | } |
2140 | #else |
2141 | #define poison_pages() |
2142 | #endif |
2143 | |
2144 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
2145 | /* Validate that the reportedly free objects actually are. */ |
2146 | |
2147 | static void |
2148 | validate_free_objects (void) |
2149 | { |
2150 | struct free_object *f, *next, *still_free = NULLnullptr; |
2151 | |
2152 | for (f = G.free_object_list; f ; f = next) |
2153 | { |
2154 | page_entry *pe = lookup_page_table_entry (f->object); |
2155 | size_t bit, word; |
2156 | |
2157 | bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order)((((char *)f->object - pe->page) * inverse_table[pe-> order].mult) >> inverse_table[pe->order].shift); |
2158 | word = bit / HOST_BITS_PER_LONG(8 * 8); |
2159 | bit = bit % HOST_BITS_PER_LONG(8 * 8); |
2160 | next = f->next; |
2161 | |
2162 | /* Make certain it isn't visible from any root. Notice that we |
2163 | do this check before sweep_pages merges save_in_use_p. */ |
2164 | gcc_assert (!(pe->in_use_p[word] & (1UL << bit)))((void)(!(!(pe->in_use_p[word] & (1UL << bit))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 2164, __FUNCTION__), 0 : 0)); |
2165 | |
2166 | /* If the object comes from an outer context, then retain the |
2167 | free_object entry, so that we can verify that the address |
2168 | isn't live on the stack in some outer context. */ |
2169 | if (pe->context_depth != G.context_depth) |
2170 | { |
2171 | f->next = still_free; |
2172 | still_free = f; |
2173 | } |
2174 | else |
2175 | free (f); |
2176 | } |
2177 | |
2178 | G.free_object_list = still_free; |
2179 | } |
2180 | #else |
2181 | #define validate_free_objects() |
2182 | #endif |
2183 | |
2184 | /* Top level mark-and-sweep routine. */ |
2185 | |
2186 | void |
2187 | ggc_collect (enum ggc_collect mode) |
2188 | { |
2189 | /* Avoid frequent unnecessary work by skipping collection if the |
2190 | total allocations haven't expanded much since the last |
2191 | collection. */ |
2192 | float allocated_last_gc = |
2193 | MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * ONE_K)((G.allocated_last_gc) > ((size_t)global_options.x_param_ggc_min_heapsize * 1024) ? (G.allocated_last_gc) : ((size_t)global_options.x_param_ggc_min_heapsize * 1024)); |
2194 | |
2195 | /* It is also good time to get memory block pool into limits. */ |
2196 | memory_block_pool::trim (); |
2197 | |
2198 | float min_expand = allocated_last_gc * param_ggc_min_expandglobal_options.x_param_ggc_min_expand / 100; |
2199 | if (mode == GGC_COLLECT_HEURISTIC |
2200 | && G.allocated < allocated_last_gc + min_expand) |
2201 | return; |
2202 | |
2203 | timevar_push (TV_GC); |
2204 | if (GGC_DEBUG_LEVEL(0) >= 2) |
2205 | fprintf (G.debug_file, "BEGIN COLLECTING\n"); |
2206 | |
2207 | /* Zero the total allocated bytes. This will be recalculated in the |
2208 | sweep phase. */ |
2209 | size_t allocated = G.allocated; |
2210 | G.allocated = 0; |
2211 | |
2212 | /* Release the pages we freed the last time we collected, but didn't |
2213 | reuse in the interim. */ |
2214 | release_pages (); |
2215 | |
2216 | /* Output this later so we do not interfere with release_pages. */ |
2217 | if (!quiet_flagglobal_options.x_quiet_flag) |
2218 | fprintf (stderrstderr, " {GC " PRsa (0)"%" "0" "l" "u" "%c" " -> ", SIZE_AMOUNT (allocated)(uint64_t)(((allocated) < 10 * 1024 ? (allocated) : ((allocated ) < 10 * (1024 * 1024) ? (allocated) / 1024 : (allocated) / (1024 * 1024)))), ((allocated) < 10 * 1024 ? ' ' : ((allocated ) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2219 | |
2220 | /* Indicate that we've seen collections at this context depth. */ |
2221 | G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; |
2222 | |
2223 | invoke_plugin_callbacks (PLUGIN_GGC_START, NULLnullptr); |
2224 | |
2225 | in_gc = true; |
2226 | clear_marks (); |
2227 | ggc_mark_roots (); |
2228 | ggc_handle_finalizers (); |
2229 | |
2230 | if (GATHER_STATISTICS0) |
2231 | ggc_prune_overhead_list (); |
2232 | |
2233 | poison_pages (); |
2234 | validate_free_objects (); |
2235 | sweep_pages (); |
2236 | |
2237 | in_gc = false; |
2238 | G.allocated_last_gc = G.allocated; |
2239 | |
2240 | invoke_plugin_callbacks (PLUGIN_GGC_END, NULLnullptr); |
2241 | |
2242 | timevar_pop (TV_GC); |
2243 | |
2244 | if (!quiet_flagglobal_options.x_quiet_flag) |
2245 | fprintf (stderrstderr, PRsa (0)"%" "0" "l" "u" "%c" "}", SIZE_AMOUNT (G.allocated)(uint64_t)(((G.allocated) < 10 * 1024 ? (G.allocated) : (( G.allocated) < 10 * (1024 * 1024) ? (G.allocated) / 1024 : (G.allocated) / (1024 * 1024)))), ((G.allocated) < 10 * 1024 ? ' ' : ((G.allocated) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2246 | if (GGC_DEBUG_LEVEL(0) >= 2) |
2247 | fprintf (G.debug_file, "END COLLECTING\n"); |
2248 | } |
2249 | |
2250 | /* Return free pages to the system. */ |
2251 | |
2252 | void |
2253 | ggc_trim () |
2254 | { |
2255 | timevar_push (TV_GC); |
2256 | G.allocated = 0; |
2257 | sweep_pages (); |
2258 | release_pages (); |
2259 | if (!quiet_flagglobal_options.x_quiet_flag) |
2260 | fprintf (stderrstderr, " {GC trimmed to " PRsa (0)"%" "0" "l" "u" "%c" ", " PRsa (0)"%" "0" "l" "u" "%c" " mapped}", |
2261 | SIZE_AMOUNT (G.allocated)(uint64_t)(((G.allocated) < 10 * 1024 ? (G.allocated) : (( G.allocated) < 10 * (1024 * 1024) ? (G.allocated) / 1024 : (G.allocated) / (1024 * 1024)))), ((G.allocated) < 10 * 1024 ? ' ' : ((G.allocated) < 10 * (1024 * 1024) ? 'k' : 'M')), SIZE_AMOUNT (G.bytes_mapped)(uint64_t)(((G.bytes_mapped) < 10 * 1024 ? (G.bytes_mapped ) : ((G.bytes_mapped) < 10 * (1024 * 1024) ? (G.bytes_mapped ) / 1024 : (G.bytes_mapped) / (1024 * 1024)))), ((G.bytes_mapped ) < 10 * 1024 ? ' ' : ((G.bytes_mapped) < 10 * (1024 * 1024 ) ? 'k' : 'M'))); |
2262 | timevar_pop (TV_GC); |
2263 | } |
2264 | |
2265 | /* Assume that all GGC memory is reachable and grow the limits for next |
2266 | collection. With checking, trigger GGC so -Q compilation outputs how much |
2267 | of memory really is reachable. */ |
2268 | |
2269 | void |
2270 | ggc_grow (void) |
2271 | { |
2272 | if (!flag_checkingglobal_options.x_flag_checking) |
2273 | G.allocated_last_gc = MAX (G.allocated_last_gc,((G.allocated_last_gc) > (G.allocated) ? (G.allocated_last_gc ) : (G.allocated)) |
2274 | G.allocated)((G.allocated_last_gc) > (G.allocated) ? (G.allocated_last_gc ) : (G.allocated)); |
2275 | else |
2276 | ggc_collect (); |
2277 | if (!quiet_flagglobal_options.x_quiet_flag) |
2278 | fprintf (stderrstderr, " {GC " PRsa (0)"%" "0" "l" "u" "%c" "} ", SIZE_AMOUNT (G.allocated)(uint64_t)(((G.allocated) < 10 * 1024 ? (G.allocated) : (( G.allocated) < 10 * (1024 * 1024) ? (G.allocated) / 1024 : (G.allocated) / (1024 * 1024)))), ((G.allocated) < 10 * 1024 ? ' ' : ((G.allocated) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2279 | } |
2280 | |
2281 | void |
2282 | ggc_print_statistics (void) |
2283 | { |
2284 | struct ggc_statistics stats; |
2285 | unsigned int i; |
2286 | size_t total_overhead = 0; |
2287 | |
2288 | /* Clear the statistics. */ |
2289 | memset (&stats, 0, sizeof (stats)); |
2290 | |
2291 | /* Make sure collection will really occur. */ |
2292 | G.allocated_last_gc = 0; |
2293 | |
2294 | /* Collect and print the statistics common across collectors. */ |
2295 | ggc_print_common_statistics (stderrstderr, &stats); |
2296 | |
2297 | /* Release free pages so that we will not count the bytes allocated |
2298 | there as part of the total allocated memory. */ |
2299 | release_pages (); |
2300 | |
2301 | /* Collect some information about the various sizes of |
2302 | allocation. */ |
2303 | fprintf (stderrstderr, |
2304 | "Memory still allocated at the end of the compilation process\n"); |
2305 | fprintf (stderrstderr, "%-8s %10s %10s %10s\n", |
2306 | "Size", "Allocated", "Used", "Overhead"); |
2307 | for (i = 0; i < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); ++i) |
2308 | { |
2309 | page_entry *p; |
2310 | size_t allocated; |
2311 | size_t in_use; |
2312 | size_t overhead; |
2313 | |
2314 | /* Skip empty entries. */ |
2315 | if (!G.pages[i]) |
2316 | continue; |
2317 | |
2318 | overhead = allocated = in_use = 0; |
2319 | |
2320 | /* Figure out the total number of bytes allocated for objects of |
2321 | this size, and how many of them are actually in use. Also figure |
2322 | out how much memory the page table is using. */ |
2323 | for (p = G.pages[i]; p; p = p->next) |
2324 | { |
2325 | allocated += p->bytes; |
2326 | in_use += |
2327 | (OBJECTS_IN_PAGE (p)((p)->bytes / object_size_table[(p)->order]) - p->num_free_objects) * OBJECT_SIZE (i)object_size_table[i]; |
2328 | |
2329 | overhead += (sizeof (page_entry) - sizeof (long) |
2330 | + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)(((((((p)->bytes / object_size_table[(p)->order]) + 1)) + ((8 * 8)) - 1) / ((8 * 8))) * sizeof (long))); |
2331 | } |
2332 | fprintf (stderrstderr, "%-8" PRIu64"l" "u" " " PRsa (10)"%" "10" "l" "u" "%c" " " PRsa (10)"%" "10" "l" "u" "%c" " " |
2333 | PRsa (10)"%" "10" "l" "u" "%c" "\n", |
2334 | (uint64_t)OBJECT_SIZE (i)object_size_table[i], |
2335 | SIZE_AMOUNT (allocated)(uint64_t)(((allocated) < 10 * 1024 ? (allocated) : ((allocated ) < 10 * (1024 * 1024) ? (allocated) / 1024 : (allocated) / (1024 * 1024)))), ((allocated) < 10 * 1024 ? ' ' : ((allocated ) < 10 * (1024 * 1024) ? 'k' : 'M')), |
2336 | SIZE_AMOUNT (in_use)(uint64_t)(((in_use) < 10 * 1024 ? (in_use) : ((in_use) < 10 * (1024 * 1024) ? (in_use) / 1024 : (in_use) / (1024 * 1024 )))), ((in_use) < 10 * 1024 ? ' ' : ((in_use) < 10 * (1024 * 1024) ? 'k' : 'M')), |
2337 | SIZE_AMOUNT (overhead)(uint64_t)(((overhead) < 10 * 1024 ? (overhead) : ((overhead ) < 10 * (1024 * 1024) ? (overhead) / 1024 : (overhead) / ( 1024 * 1024)))), ((overhead) < 10 * 1024 ? ' ' : ((overhead ) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2338 | total_overhead += overhead; |
2339 | } |
2340 | fprintf (stderrstderr, "%-8s " PRsa (10)"%" "10" "l" "u" "%c" " " PRsa (10)"%" "10" "l" "u" "%c" " " PRsa (10)"%" "10" "l" "u" "%c" "\n", |
2341 | "Total", |
2342 | SIZE_AMOUNT (G.bytes_mapped)(uint64_t)(((G.bytes_mapped) < 10 * 1024 ? (G.bytes_mapped ) : ((G.bytes_mapped) < 10 * (1024 * 1024) ? (G.bytes_mapped ) / 1024 : (G.bytes_mapped) / (1024 * 1024)))), ((G.bytes_mapped ) < 10 * 1024 ? ' ' : ((G.bytes_mapped) < 10 * (1024 * 1024 ) ? 'k' : 'M')), |
2343 | SIZE_AMOUNT (G.allocated)(uint64_t)(((G.allocated) < 10 * 1024 ? (G.allocated) : (( G.allocated) < 10 * (1024 * 1024) ? (G.allocated) / 1024 : (G.allocated) / (1024 * 1024)))), ((G.allocated) < 10 * 1024 ? ' ' : ((G.allocated) < 10 * (1024 * 1024) ? 'k' : 'M')), |
2344 | SIZE_AMOUNT (total_overhead)(uint64_t)(((total_overhead) < 10 * 1024 ? (total_overhead ) : ((total_overhead) < 10 * (1024 * 1024) ? (total_overhead ) / 1024 : (total_overhead) / (1024 * 1024)))), ((total_overhead ) < 10 * 1024 ? ' ' : ((total_overhead) < 10 * (1024 * 1024 ) ? 'k' : 'M'))); |
2345 | |
2346 | if (GATHER_STATISTICS0) |
2347 | { |
2348 | fprintf (stderrstderr, "\nTotal allocations and overheads during " |
2349 | "the compilation process\n"); |
2350 | |
2351 | fprintf (stderrstderr, "Total Overhead: " |
2352 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2353 | SIZE_AMOUNT (G.stats.total_overhead)(uint64_t)(((G.stats.total_overhead) < 10 * 1024 ? (G.stats .total_overhead) : ((G.stats.total_overhead) < 10 * (1024 * 1024) ? (G.stats.total_overhead) / 1024 : (G.stats.total_overhead ) / (1024 * 1024)))), ((G.stats.total_overhead) < 10 * 1024 ? ' ' : ((G.stats.total_overhead) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2354 | fprintf (stderrstderr, "Total Allocated: " |
2355 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2356 | SIZE_AMOUNT (G.stats.total_allocated)(uint64_t)(((G.stats.total_allocated) < 10 * 1024 ? (G.stats .total_allocated) : ((G.stats.total_allocated) < 10 * (1024 * 1024) ? (G.stats.total_allocated) / 1024 : (G.stats.total_allocated ) / (1024 * 1024)))), ((G.stats.total_allocated) < 10 * 1024 ? ' ' : ((G.stats.total_allocated) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2357 | |
2358 | fprintf (stderrstderr, "Total Overhead under 32B: " |
2359 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2360 | SIZE_AMOUNT (G.stats.total_overhead_under32)(uint64_t)(((G.stats.total_overhead_under32) < 10 * 1024 ? (G.stats.total_overhead_under32) : ((G.stats.total_overhead_under32 ) < 10 * (1024 * 1024) ? (G.stats.total_overhead_under32) / 1024 : (G.stats.total_overhead_under32) / (1024 * 1024)))), ( (G.stats.total_overhead_under32) < 10 * 1024 ? ' ' : ((G.stats .total_overhead_under32) < 10 * (1024 * 1024) ? 'k' : 'M') )); |
2361 | fprintf (stderrstderr, "Total Allocated under 32B: " |
2362 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2363 | SIZE_AMOUNT (G.stats.total_allocated_under32)(uint64_t)(((G.stats.total_allocated_under32) < 10 * 1024 ? (G.stats.total_allocated_under32) : ((G.stats.total_allocated_under32 ) < 10 * (1024 * 1024) ? (G.stats.total_allocated_under32) / 1024 : (G.stats.total_allocated_under32) / (1024 * 1024))) ), ((G.stats.total_allocated_under32) < 10 * 1024 ? ' ' : ( (G.stats.total_allocated_under32) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2364 | fprintf (stderrstderr, "Total Overhead under 64B: " |
2365 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2366 | SIZE_AMOUNT (G.stats.total_overhead_under64)(uint64_t)(((G.stats.total_overhead_under64) < 10 * 1024 ? (G.stats.total_overhead_under64) : ((G.stats.total_overhead_under64 ) < 10 * (1024 * 1024) ? (G.stats.total_overhead_under64) / 1024 : (G.stats.total_overhead_under64) / (1024 * 1024)))), ( (G.stats.total_overhead_under64) < 10 * 1024 ? ' ' : ((G.stats .total_overhead_under64) < 10 * (1024 * 1024) ? 'k' : 'M') )); |
2367 | fprintf (stderrstderr, "Total Allocated under 64B: " |
2368 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2369 | SIZE_AMOUNT (G.stats.total_allocated_under64)(uint64_t)(((G.stats.total_allocated_under64) < 10 * 1024 ? (G.stats.total_allocated_under64) : ((G.stats.total_allocated_under64 ) < 10 * (1024 * 1024) ? (G.stats.total_allocated_under64) / 1024 : (G.stats.total_allocated_under64) / (1024 * 1024))) ), ((G.stats.total_allocated_under64) < 10 * 1024 ? ' ' : ( (G.stats.total_allocated_under64) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2370 | fprintf (stderrstderr, "Total Overhead under 128B: " |
2371 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2372 | SIZE_AMOUNT (G.stats.total_overhead_under128)(uint64_t)(((G.stats.total_overhead_under128) < 10 * 1024 ? (G.stats.total_overhead_under128) : ((G.stats.total_overhead_under128 ) < 10 * (1024 * 1024) ? (G.stats.total_overhead_under128) / 1024 : (G.stats.total_overhead_under128) / (1024 * 1024))) ), ((G.stats.total_overhead_under128) < 10 * 1024 ? ' ' : ( (G.stats.total_overhead_under128) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2373 | fprintf (stderrstderr, "Total Allocated under 128B: " |
2374 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2375 | SIZE_AMOUNT (G.stats.total_allocated_under128)(uint64_t)(((G.stats.total_allocated_under128) < 10 * 1024 ? (G.stats.total_allocated_under128) : ((G.stats.total_allocated_under128 ) < 10 * (1024 * 1024) ? (G.stats.total_allocated_under128 ) / 1024 : (G.stats.total_allocated_under128) / (1024 * 1024) ))), ((G.stats.total_allocated_under128) < 10 * 1024 ? ' ' : ((G.stats.total_allocated_under128) < 10 * (1024 * 1024 ) ? 'k' : 'M'))); |
2376 | |
2377 | for (i = 0; i < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); i++) |
2378 | if (G.stats.total_allocated_per_order[i]) |
2379 | { |
2380 | fprintf (stderrstderr, "Total Overhead page size %9" PRIu64"l" "u" ": " |
2381 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2382 | (uint64_t)OBJECT_SIZE (i)object_size_table[i], |
2383 | SIZE_AMOUNT (G.stats.total_overhead_per_order[i])(uint64_t)(((G.stats.total_overhead_per_order[i]) < 10 * 1024 ? (G.stats.total_overhead_per_order[i]) : ((G.stats.total_overhead_per_order [i]) < 10 * (1024 * 1024) ? (G.stats.total_overhead_per_order [i]) / 1024 : (G.stats.total_overhead_per_order[i]) / (1024 * 1024)))), ((G.stats.total_overhead_per_order[i]) < 10 * 1024 ? ' ' : ((G.stats.total_overhead_per_order[i]) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2384 | fprintf (stderrstderr, "Total Allocated page size %9" PRIu64"l" "u" ": " |
2385 | PRsa (9)"%" "9" "l" "u" "%c" "\n", |
2386 | (uint64_t)OBJECT_SIZE (i)object_size_table[i], |
2387 | SIZE_AMOUNT (G.stats.total_allocated_per_order[i])(uint64_t)(((G.stats.total_allocated_per_order[i]) < 10 * 1024 ? (G.stats.total_allocated_per_order[i]) : ((G.stats.total_allocated_per_order [i]) < 10 * (1024 * 1024) ? (G.stats.total_allocated_per_order [i]) / 1024 : (G.stats.total_allocated_per_order[i]) / (1024 * 1024)))), ((G.stats.total_allocated_per_order[i]) < 10 * 1024 ? ' ' : ((G.stats.total_allocated_per_order[i]) < 10 * (1024 * 1024) ? 'k' : 'M'))); |
2388 | } |
2389 | } |
2390 | } |
2391 | |
2392 | struct ggc_pch_ondisk |
2393 | { |
2394 | unsigned totals[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
2395 | }; |
2396 | |
2397 | struct ggc_pch_data |
2398 | { |
2399 | struct ggc_pch_ondisk d; |
2400 | uintptr_t base[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
2401 | size_t written[NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0])))]; |
2402 | }; |
2403 | |
2404 | struct ggc_pch_data * |
2405 | init_ggc_pch (void) |
2406 | { |
2407 | return XCNEW (struct ggc_pch_data)((struct ggc_pch_data *) xcalloc (1, sizeof (struct ggc_pch_data ))); |
2408 | } |
2409 | |
2410 | void |
2411 | ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
2412 | size_t size, bool is_string ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
2413 | { |
2414 | unsigned order; |
2415 | |
2416 | if (size < NUM_SIZE_LOOKUP512) |
2417 | order = size_lookup[size]; |
2418 | else |
2419 | { |
2420 | order = 10; |
2421 | while (size > OBJECT_SIZE (order)object_size_table[order]) |
2422 | order++; |
2423 | } |
2424 | |
2425 | d->d.totals[order]++; |
2426 | } |
2427 | |
2428 | size_t |
2429 | ggc_pch_total_size (struct ggc_pch_data *d) |
2430 | { |
2431 | size_t a = 0; |
2432 | unsigned i; |
2433 | |
2434 | for (i = 0; i < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); i++) |
2435 | a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i))((((d->d.totals[i] * object_size_table[i])) + (G.pagesize) - 1) & ~((G.pagesize) - 1)); |
2436 | return a; |
2437 | } |
2438 | |
2439 | void |
2440 | ggc_pch_this_base (struct ggc_pch_data *d, void *base) |
2441 | { |
2442 | uintptr_t a = (uintptr_t) base; |
2443 | unsigned i; |
2444 | |
2445 | for (i = 0; i < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); i++) |
2446 | { |
2447 | d->base[i] = a; |
2448 | a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i))((((d->d.totals[i] * object_size_table[i])) + (G.pagesize) - 1) & ~((G.pagesize) - 1)); |
2449 | } |
2450 | } |
2451 | |
2452 | |
2453 | char * |
2454 | ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
2455 | size_t size, bool is_string ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
2456 | { |
2457 | unsigned order; |
2458 | char *result; |
2459 | |
2460 | if (size < NUM_SIZE_LOOKUP512) |
2461 | order = size_lookup[size]; |
2462 | else |
2463 | { |
2464 | order = 10; |
2465 | while (size > OBJECT_SIZE (order)object_size_table[order]) |
2466 | order++; |
2467 | } |
2468 | |
2469 | result = (char *) d->base[order]; |
2470 | d->base[order] += OBJECT_SIZE (order)object_size_table[order]; |
2471 | return result; |
2472 | } |
2473 | |
2474 | void |
2475 | ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
2476 | FILE *f ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
2477 | { |
2478 | /* Nothing to do. */ |
2479 | } |
2480 | |
2481 | void |
2482 | ggc_pch_write_object (struct ggc_pch_data *d, |
2483 | FILE *f, void *x, void *newx ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
2484 | size_t size, bool is_string ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
2485 | { |
2486 | unsigned order; |
2487 | static const char emptyBytes[256] = { 0 }; |
2488 | |
2489 | if (size < NUM_SIZE_LOOKUP512) |
2490 | order = size_lookup[size]; |
2491 | else |
2492 | { |
2493 | order = 10; |
2494 | while (size > OBJECT_SIZE (order)object_size_table[order]) |
2495 | order++; |
2496 | } |
2497 | |
2498 | if (fwrite (x, size, 1, f) != 1) |
2499 | fatal_error (input_location, "cannot write PCH file: %m"); |
2500 | |
2501 | /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the |
2502 | object out to OBJECT_SIZE(order). This happens for strings. */ |
2503 | |
2504 | if (size != OBJECT_SIZE (order)object_size_table[order]) |
2505 | { |
2506 | unsigned padding = OBJECT_SIZE (order)object_size_table[order] - size; |
2507 | |
2508 | /* To speed small writes, we use a nulled-out array that's larger |
2509 | than most padding requests as the source for our null bytes. This |
2510 | permits us to do the padding with fwrite() rather than fseek(), and |
2511 | limits the chance the OS may try to flush any outstanding writes. */ |
2512 | if (padding <= sizeof (emptyBytes)) |
2513 | { |
2514 | if (fwrite (emptyBytes, 1, padding, f) != padding) |
2515 | fatal_error (input_location, "cannot write PCH file"); |
2516 | } |
2517 | else |
2518 | { |
2519 | /* Larger than our buffer? Just default to fseek. */ |
2520 | if (fseek (f, padding, SEEK_CUR1) != 0) |
2521 | fatal_error (input_location, "cannot write PCH file"); |
2522 | } |
2523 | } |
2524 | |
2525 | d->written[order]++; |
2526 | if (d->written[order] == d->d.totals[order] |
2527 | && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),((G.pagesize) - 1 - ((G.pagesize) - 1 + (d->d.totals[order ] * object_size_table[order])) % (G.pagesize)) |
2528 | G.pagesize)((G.pagesize) - 1 - ((G.pagesize) - 1 + (d->d.totals[order ] * object_size_table[order])) % (G.pagesize)), |
2529 | SEEK_CUR1) != 0) |
2530 | fatal_error (input_location, "cannot write PCH file: %m"); |
2531 | } |
2532 | |
2533 | void |
2534 | ggc_pch_finish (struct ggc_pch_data *d, FILE *f) |
2535 | { |
2536 | if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) |
2537 | fatal_error (input_location, "cannot write PCH file: %m"); |
2538 | free (d); |
2539 | } |
2540 | |
2541 | /* Move the PCH PTE entries just added to the end of by_depth, to the |
2542 | front. */ |
2543 | |
2544 | static void |
2545 | move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) |
2546 | { |
2547 | /* First, we swap the new entries to the front of the varrays. */ |
2548 | page_entry **new_by_depth; |
2549 | unsigned long **new_save_in_use; |
2550 | |
2551 | new_by_depth = XNEWVEC (page_entry *, G.by_depth_max)((page_entry * *) xmalloc (sizeof (page_entry *) * (G.by_depth_max ))); |
2552 | new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max)((unsigned long * *) xmalloc (sizeof (unsigned long *) * (G.by_depth_max ))); |
2553 | |
2554 | memcpy (&new_by_depth[0], |
2555 | &G.by_depth[count_old_page_tables], |
2556 | count_new_page_tables * sizeof (void *)); |
2557 | memcpy (&new_by_depth[count_new_page_tables], |
2558 | &G.by_depth[0], |
2559 | count_old_page_tables * sizeof (void *)); |
2560 | memcpy (&new_save_in_use[0], |
2561 | &G.save_in_use[count_old_page_tables], |
2562 | count_new_page_tables * sizeof (void *)); |
2563 | memcpy (&new_save_in_use[count_new_page_tables], |
2564 | &G.save_in_use[0], |
2565 | count_old_page_tables * sizeof (void *)); |
2566 | |
2567 | free (G.by_depth); |
2568 | free (G.save_in_use); |
2569 | |
2570 | G.by_depth = new_by_depth; |
2571 | G.save_in_use = new_save_in_use; |
2572 | |
2573 | /* Now update all the index_by_depth fields. */ |
2574 | for (unsigned i = G.by_depth_in_use; i--;) |
2575 | { |
2576 | page_entry *p = G.by_depth[i]; |
2577 | p->index_by_depth = i; |
2578 | } |
2579 | |
2580 | /* And last, we update the depth pointers in G.depth. The first |
2581 | entry is already 0, and context 0 entries always start at index |
2582 | 0, so there is nothing to update in the first slot. We need a |
2583 | second slot, only if we have old ptes, and if we do, they start |
2584 | at index count_new_page_tables. */ |
2585 | if (count_old_page_tables) |
2586 | push_depth (count_new_page_tables); |
2587 | } |
2588 | |
2589 | void |
2590 | ggc_pch_read (FILE *f, void *addr) |
2591 | { |
2592 | struct ggc_pch_ondisk d; |
2593 | unsigned i; |
2594 | char *offs = (char *) addr; |
2595 | unsigned long count_old_page_tables; |
2596 | unsigned long count_new_page_tables; |
2597 | |
2598 | count_old_page_tables = G.by_depth_in_use; |
2599 | |
2600 | if (fread (&d, sizeof (d), 1, f) != 1) |
2601 | fatal_error (input_location, "cannot read PCH file: %m"); |
2602 | |
2603 | /* We've just read in a PCH file. So, every object that used to be |
2604 | allocated is now free. */ |
2605 | clear_marks (); |
2606 | #ifdef ENABLE_GC_CHECKING1 |
2607 | poison_pages (); |
2608 | #endif |
2609 | /* Since we free all the allocated objects, the free list becomes |
2610 | useless. Validate it now, which will also clear it. */ |
2611 | validate_free_objects (); |
2612 | |
2613 | /* No object read from a PCH file should ever be freed. So, set the |
2614 | context depth to 1, and set the depth of all the currently-allocated |
2615 | pages to be 1 too. PCH pages will have depth 0. */ |
2616 | gcc_assert (!G.context_depth)((void)(!(!G.context_depth) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 2616, __FUNCTION__), 0 : 0)); |
2617 | G.context_depth = 1; |
2618 | /* Allocate space for the depth 1 finalizers. */ |
2619 | G.finalizers.safe_push (vNULL); |
2620 | G.vec_finalizers.safe_push (vNULL); |
2621 | gcc_assert (G.finalizers.length() == 2)((void)(!(G.finalizers.length() == 2) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ggc-page.cc" , 2621, __FUNCTION__), 0 : 0)); |
2622 | for (i = 0; i < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); i++) |
2623 | { |
2624 | page_entry *p; |
2625 | for (p = G.pages[i]; p != NULLnullptr; p = p->next) |
2626 | p->context_depth = G.context_depth; |
2627 | } |
2628 | |
2629 | /* Allocate the appropriate page-table entries for the pages read from |
2630 | the PCH file. */ |
2631 | |
2632 | for (i = 0; i < NUM_ORDERS((8 * 8) + (sizeof (extra_order_size_table) / sizeof ((extra_order_size_table )[0]))); i++) |
2633 | { |
2634 | struct page_entry *entry; |
2635 | char *pte; |
2636 | size_t bytes; |
2637 | size_t num_objs; |
2638 | size_t j; |
2639 | |
2640 | if (d.totals[i] == 0) |
2641 | continue; |
2642 | |
2643 | bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i))((((d.totals[i] * object_size_table[i])) + (G.pagesize) - 1) & ~((G.pagesize) - 1)); |
2644 | num_objs = bytes / OBJECT_SIZE (i)object_size_table[i]; |
2645 | entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)((struct page_entry *) xcalloc (1, ((sizeof (struct page_entry ) - sizeof (long) + (((((num_objs + 1)) + ((8 * 8)) - 1) / (( 8 * 8))) * sizeof (long)))))) |
2646 | - sizeof (long)((struct page_entry *) xcalloc (1, ((sizeof (struct page_entry ) - sizeof (long) + (((((num_objs + 1)) + ((8 * 8)) - 1) / (( 8 * 8))) * sizeof (long)))))) |
2647 | + BITMAP_SIZE (num_objs + 1)))((struct page_entry *) xcalloc (1, ((sizeof (struct page_entry ) - sizeof (long) + (((((num_objs + 1)) + ((8 * 8)) - 1) / (( 8 * 8))) * sizeof (long)))))); |
2648 | entry->bytes = bytes; |
2649 | entry->page = offs; |
2650 | entry->context_depth = 0; |
2651 | offs += bytes; |
2652 | entry->num_free_objects = 0; |
2653 | entry->order = i; |
2654 | |
2655 | for (j = 0; |
2656 | j + HOST_BITS_PER_LONG(8 * 8) <= num_objs + 1; |
2657 | j += HOST_BITS_PER_LONG(8 * 8)) |
2658 | entry->in_use_p[j / HOST_BITS_PER_LONG(8 * 8)] = -1; |
2659 | for (; j < num_objs + 1; j++) |
2660 | entry->in_use_p[j / HOST_BITS_PER_LONG(8 * 8)] |
2661 | |= 1L << (j % HOST_BITS_PER_LONG(8 * 8)); |
2662 | |
2663 | for (pte = entry->page; |
2664 | pte < entry->page + entry->bytes; |
2665 | pte += G.pagesize) |
2666 | set_page_table_entry (pte, entry); |
2667 | |
2668 | if (G.page_tails[i] != NULLnullptr) |
2669 | G.page_tails[i]->next = entry; |
2670 | else |
2671 | G.pages[i] = entry; |
2672 | G.page_tails[i] = entry; |
2673 | |
2674 | /* We start off by just adding all the new information to the |
2675 | end of the varrays, later, we will move the new information |
2676 | to the front of the varrays, as the PCH page tables are at |
2677 | context 0. */ |
2678 | push_by_depth (entry, 0); |
2679 | } |
2680 | |
2681 | /* Now, we update the various data structures that speed page table |
2682 | handling. */ |
2683 | count_new_page_tables = G.by_depth_in_use - count_old_page_tables; |
2684 | |
2685 | move_ptes_to_front (count_old_page_tables, count_new_page_tables); |
2686 | |
2687 | /* Update the statistics. */ |
2688 | G.allocated = G.allocated_last_gc = offs - (char *)addr; |
2689 | } |