51488
|
1 /*
|
|
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
|
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
|
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
|
|
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
|
|
6 *
|
|
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
|
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
|
9 *
|
|
10 * Permission is hereby granted to use or copy this program
|
|
11 * for any purpose, provided the above notices are retained on all copies.
|
|
12 * Permission to modify the code and to distribute modified code is granted,
|
|
13 * provided the above notices are retained, and a notice that the code was
|
|
14 * modified is included with the above copyright notice.
|
|
15 */
|
|
16
|
|
17 /* #define DEBUG */
|
|
18 #include <stdio.h>
|
|
19 #include "private/gc_priv.h"
|
|
20
|
|
21 GC_bool GC_use_entire_heap = 0;
|
|
22
|
|
23 /*
|
|
24 * Free heap blocks are kept on one of several free lists,
|
|
25 * depending on the size of the block. Each free list is doubly linked.
|
|
26 * Adjacent free blocks are coalesced.
|
|
27 */
|
|
28
|
|
29
|
|
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
|
|
31 /* largest block we will allocate starting on a black */
|
|
32 /* listed block. Must be >= HBLKSIZE. */
|
|
33
|
|
34
|
|
35 # define UNIQUE_THRESHOLD 32
|
|
36 /* Sizes up to this many HBLKs each have their own free list */
|
|
37 # define HUGE_THRESHOLD 256
|
|
38 /* Sizes of at least this many heap blocks are mapped to a */
|
|
39 /* single free list. */
|
|
40 # define FL_COMPRESSION 8
|
|
41 /* In between sizes map this many distinct sizes to a single */
|
|
42 /* bin. */
|
|
43
|
|
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
|
|
45 + UNIQUE_THRESHOLD
|
|
46
|
|
47 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
|
|
48
|
|
49 #ifndef USE_MUNMAP
|
|
50
|
|
51 word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
|
|
52 /* Number of free bytes on each list. */
|
|
53
|
|
54 /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
|
|
55 /* > GC_max_large_allocd_bytes? */
|
|
56 # ifdef __GNUC__
|
|
57 __inline__
|
|
58 # endif
|
|
59 static GC_bool GC_enough_large_bytes_left(bytes,n)
|
|
60 word bytes;
|
|
61 int n;
|
|
62 {
|
|
63 int i;
|
|
64 for (i = N_HBLK_FLS; i >= n; --i) {
|
|
65 bytes += GC_free_bytes[i];
|
|
66 if (bytes > GC_max_large_allocd_bytes) return TRUE;
|
|
67 }
|
|
68 return FALSE;
|
|
69 }
|
|
70
|
|
71 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
|
|
72
|
|
73 # define FREE_ASSERT(e) GC_ASSERT(e)
|
|
74
|
|
75 #else /* USE_MUNMAP */
|
|
76
|
|
77 # define INCR_FREE_BYTES(n, b)
|
|
78 # define FREE_ASSERT(e)
|
|
79
|
|
80 #endif /* USE_MUNMAP */
|
|
81
|
|
82 /* Map a number of blocks to the appropriate large block free list index. */
|
|
83 int GC_hblk_fl_from_blocks(blocks_needed)
|
|
84 word blocks_needed;
|
|
85 {
|
|
86 if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
|
|
87 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
|
|
88 return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
|
|
89 + UNIQUE_THRESHOLD;
|
|
90
|
|
91 }
|
|
92
|
|
93 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
|
|
94 # define NHDR(hhdr) HDR(hhdr -> hb_next)
|
|
95
|
|
96 # ifdef USE_MUNMAP
|
|
97 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
|
|
98 # else /* !USE_MMAP */
|
|
99 # define IS_MAPPED(hhdr) 1
|
|
100 # endif /* USE_MUNMAP */
|
|
101
|
|
102 # if !defined(NO_DEBUGGING)
|
|
103 void GC_print_hblkfreelist()
|
|
104 {
|
|
105 struct hblk * h;
|
|
106 word total_free = 0;
|
|
107 hdr * hhdr;
|
|
108 word sz;
|
|
109 int i;
|
|
110
|
|
111 for (i = 0; i <= N_HBLK_FLS; ++i) {
|
|
112 h = GC_hblkfreelist[i];
|
|
113 # ifdef USE_MUNMAP
|
|
114 if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n",
|
|
115 (unsigned long)i);
|
|
116 # else
|
|
117 if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
|
|
118 (unsigned long)i,
|
|
119 (unsigned long)GC_free_bytes[i]);
|
|
120 # endif
|
|
121 while (h != 0) {
|
|
122 hhdr = HDR(h);
|
|
123 sz = hhdr -> hb_sz;
|
|
124 GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
|
|
125 total_free += sz;
|
|
126 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
|
|
127 GC_printf0("start black listed\n");
|
|
128 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
|
|
129 GC_printf0("partially black listed\n");
|
|
130 } else {
|
|
131 GC_printf0("not black listed\n");
|
|
132 }
|
|
133 h = hhdr -> hb_next;
|
|
134 }
|
|
135 }
|
|
136 if (total_free != GC_large_free_bytes) {
|
|
137 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
|
|
138 (unsigned long) GC_large_free_bytes);
|
|
139 }
|
|
140 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
|
|
141 }
|
|
142
|
|
143 /* Return the free list index on which the block described by the header */
|
|
144 /* appears, or -1 if it appears nowhere. */
|
|
145 int free_list_index_of(wanted)
|
|
146 hdr * wanted;
|
|
147 {
|
|
148 struct hblk * h;
|
|
149 hdr * hhdr;
|
|
150 int i;
|
|
151
|
|
152 for (i = 0; i <= N_HBLK_FLS; ++i) {
|
|
153 h = GC_hblkfreelist[i];
|
|
154 while (h != 0) {
|
|
155 hhdr = HDR(h);
|
|
156 if (hhdr == wanted) return i;
|
|
157 h = hhdr -> hb_next;
|
|
158 }
|
|
159 }
|
|
160 return -1;
|
|
161 }
|
|
162
|
|
163 void GC_dump_regions()
|
|
164 {
|
|
165 unsigned i;
|
|
166 ptr_t start, end;
|
|
167 ptr_t p;
|
|
168 size_t bytes;
|
|
169 hdr *hhdr;
|
|
170 for (i = 0; i < GC_n_heap_sects; ++i) {
|
|
171 start = GC_heap_sects[i].hs_start;
|
|
172 bytes = GC_heap_sects[i].hs_bytes;
|
|
173 end = start + bytes;
|
|
174 /* Merge in contiguous sections. */
|
|
175 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
|
|
176 ++i;
|
|
177 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
|
|
178 }
|
|
179 GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
|
|
180 for (p = start; p < end;) {
|
|
181 hhdr = HDR(p);
|
|
182 GC_printf1("\t0x%lx ", (unsigned long)p);
|
|
183 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
|
|
184 GC_printf1("Missing header!!\n", hhdr);
|
|
185 p += HBLKSIZE;
|
|
186 continue;
|
|
187 }
|
|
188 if (HBLK_IS_FREE(hhdr)) {
|
|
189 int correct_index = GC_hblk_fl_from_blocks(
|
|
190 divHBLKSZ(hhdr -> hb_sz));
|
|
191 int actual_index;
|
|
192
|
|
193 GC_printf1("\tfree block of size 0x%lx bytes",
|
|
194 (unsigned long)(hhdr -> hb_sz));
|
|
195 if (IS_MAPPED(hhdr)) {
|
|
196 GC_printf0("\n");
|
|
197 } else {
|
|
198 GC_printf0("(unmapped)\n");
|
|
199 }
|
|
200 actual_index = free_list_index_of(hhdr);
|
|
201 if (-1 == actual_index) {
|
|
202 GC_printf1("\t\tBlock not on free list %ld!!\n",
|
|
203 correct_index);
|
|
204 } else if (correct_index != actual_index) {
|
|
205 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
|
|
206 actual_index, correct_index);
|
|
207 }
|
|
208 p += hhdr -> hb_sz;
|
|
209 } else {
|
|
210 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
|
|
211 (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
|
|
212 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
|
|
213 }
|
|
214 }
|
|
215 }
|
|
216 }
|
|
217
|
|
218 # endif /* NO_DEBUGGING */
|
|
219
|
|
220 /* Initialize hdr for a block containing the indicated size and */
|
|
221 /* kind of objects. */
|
|
222 /* Return FALSE on failure. */
|
|
223 static GC_bool setup_header(hhdr, sz, kind, flags)
|
|
224 register hdr * hhdr;
|
|
225 word sz; /* object size in words */
|
|
226 int kind;
|
|
227 unsigned char flags;
|
|
228 {
|
|
229 register word descr;
|
|
230
|
|
231 /* Add description of valid object pointers */
|
|
232 if (!GC_add_map_entry(sz)) return(FALSE);
|
|
233 hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
|
|
234
|
|
235 /* Set size, kind and mark proc fields */
|
|
236 hhdr -> hb_sz = sz;
|
|
237 hhdr -> hb_obj_kind = kind;
|
|
238 hhdr -> hb_flags = flags;
|
|
239 descr = GC_obj_kinds[kind].ok_descriptor;
|
|
240 if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
|
|
241 hhdr -> hb_descr = descr;
|
|
242
|
|
243 /* Clear mark bits */
|
|
244 GC_clear_hdr_marks(hhdr);
|
|
245
|
|
246 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
|
|
247 return(TRUE);
|
|
248 }
|
|
249
|
|
250 #define FL_UNKNOWN -1
|
|
251 /*
|
|
252 * Remove hhdr from the appropriate free list.
|
|
253 * We assume it is on the nth free list, or on the size
|
|
254 * appropriate free list if n is FL_UNKNOWN.
|
|
255 */
|
|
256 void GC_remove_from_fl(hhdr, n)
|
|
257 hdr * hhdr;
|
|
258 int n;
|
|
259 {
|
|
260 int index;
|
|
261
|
|
262 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
|
|
263 # ifndef USE_MUNMAP
|
|
264 /* We always need index to mainatin free counts. */
|
|
265 if (FL_UNKNOWN == n) {
|
|
266 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
|
267 } else {
|
|
268 index = n;
|
|
269 }
|
|
270 # endif
|
|
271 if (hhdr -> hb_prev == 0) {
|
|
272 # ifdef USE_MUNMAP
|
|
273 if (FL_UNKNOWN == n) {
|
|
274 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
|
275 } else {
|
|
276 index = n;
|
|
277 }
|
|
278 # endif
|
|
279 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
|
|
280 GC_hblkfreelist[index] = hhdr -> hb_next;
|
|
281 } else {
|
|
282 hdr *phdr;
|
|
283 GET_HDR(hhdr -> hb_prev, phdr);
|
|
284 phdr -> hb_next = hhdr -> hb_next;
|
|
285 }
|
|
286 INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
|
|
287 FREE_ASSERT(GC_free_bytes[index] >= 0);
|
|
288 if (0 != hhdr -> hb_next) {
|
|
289 hdr * nhdr;
|
|
290 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
|
|
291 GET_HDR(hhdr -> hb_next, nhdr);
|
|
292 nhdr -> hb_prev = hhdr -> hb_prev;
|
|
293 }
|
|
294 }
|
|
295
|
|
296 /*
|
|
297 * Return a pointer to the free block ending just before h, if any.
|
|
298 */
|
|
299 struct hblk * GC_free_block_ending_at(h)
|
|
300 struct hblk *h;
|
|
301 {
|
|
302 struct hblk * p = h - 1;
|
|
303 hdr * phdr;
|
|
304
|
|
305 GET_HDR(p, phdr);
|
|
306 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
|
|
307 p = FORWARDED_ADDR(p,phdr);
|
|
308 phdr = HDR(p);
|
|
309 }
|
|
310 if (0 != phdr) {
|
|
311 if(HBLK_IS_FREE(phdr)) {
|
|
312 return p;
|
|
313 } else {
|
|
314 return 0;
|
|
315 }
|
|
316 }
|
|
317 p = GC_prev_block(h - 1);
|
|
318 if (0 != p) {
|
|
319 phdr = HDR(p);
|
|
320 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
|
|
321 return p;
|
|
322 }
|
|
323 }
|
|
324 return 0;
|
|
325 }
|
|
326
|
|
327 /*
|
|
328 * Add hhdr to the appropriate free list.
|
|
329 * We maintain individual free lists sorted by address.
|
|
330 */
|
|
331 void GC_add_to_fl(h, hhdr)
|
|
332 struct hblk *h;
|
|
333 hdr * hhdr;
|
|
334 {
|
|
335 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
|
336 struct hblk *second = GC_hblkfreelist[index];
|
|
337 hdr * second_hdr;
|
|
338 # ifdef GC_ASSERTIONS
|
|
339 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
|
|
340 hdr * nexthdr = HDR(next);
|
|
341 struct hblk *prev = GC_free_block_ending_at(h);
|
|
342 hdr * prevhdr = HDR(prev);
|
|
343 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
|
|
344 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
|
|
345 # endif
|
|
346 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
|
|
347 GC_hblkfreelist[index] = h;
|
|
348 INCR_FREE_BYTES(index, hhdr -> hb_sz);
|
|
349 FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
|
|
350 hhdr -> hb_next = second;
|
|
351 hhdr -> hb_prev = 0;
|
|
352 if (0 != second) {
|
|
353 GET_HDR(second, second_hdr);
|
|
354 second_hdr -> hb_prev = h;
|
|
355 }
|
|
356 GC_invalidate_map(hhdr);
|
|
357 }
|
|
358
|
|
359 #ifdef USE_MUNMAP
|
|
360
|
|
361 /* Unmap blocks that haven't been recently touched. This is the only way */
|
|
362 /* way blocks are ever unmapped. */
|
|
363 void GC_unmap_old(void)
|
|
364 {
|
|
365 struct hblk * h;
|
|
366 hdr * hhdr;
|
|
367 word sz;
|
|
368 unsigned short last_rec, threshold;
|
|
369 int i;
|
|
370 # define UNMAP_THRESHOLD 6
|
|
371
|
|
372 for (i = 0; i <= N_HBLK_FLS; ++i) {
|
|
373 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
|
|
374 hhdr = HDR(h);
|
|
375 if (!IS_MAPPED(hhdr)) continue;
|
|
376 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
|
|
377 last_rec = hhdr -> hb_last_reclaimed;
|
|
378 if (last_rec > GC_gc_no
|
|
379 || last_rec < threshold && threshold < GC_gc_no
|
|
380 /* not recently wrapped */) {
|
|
381 sz = hhdr -> hb_sz;
|
|
382 GC_unmap((ptr_t)h, sz);
|
|
383 hhdr -> hb_flags |= WAS_UNMAPPED;
|
|
384 }
|
|
385 }
|
|
386 }
|
|
387 }
|
|
388
|
|
389 /* Merge all unmapped blocks that are adjacent to other free */
|
|
390 /* blocks. This may involve remapping, since all blocks are either */
|
|
391 /* fully mapped or fully unmapped. */
|
|
392 void GC_merge_unmapped(void)
|
|
393 {
|
|
394 struct hblk * h, *next;
|
|
395 hdr * hhdr, *nexthdr;
|
|
396 word size, nextsize;
|
|
397 int i;
|
|
398
|
|
399 for (i = 0; i <= N_HBLK_FLS; ++i) {
|
|
400 h = GC_hblkfreelist[i];
|
|
401 while (h != 0) {
|
|
402 GET_HDR(h, hhdr);
|
|
403 size = hhdr->hb_sz;
|
|
404 next = (struct hblk *)((word)h + size);
|
|
405 GET_HDR(next, nexthdr);
|
|
406 /* Coalesce with successor, if possible */
|
|
407 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
|
|
408 nextsize = nexthdr -> hb_sz;
|
|
409 if (IS_MAPPED(hhdr)) {
|
|
410 GC_ASSERT(!IS_MAPPED(nexthdr));
|
|
411 /* make both consistent, so that we can merge */
|
|
412 if (size > nextsize) {
|
|
413 GC_remap((ptr_t)next, nextsize);
|
|
414 } else {
|
|
415 GC_unmap((ptr_t)h, size);
|
|
416 hhdr -> hb_flags |= WAS_UNMAPPED;
|
|
417 }
|
|
418 } else if (IS_MAPPED(nexthdr)) {
|
|
419 GC_ASSERT(!IS_MAPPED(hhdr));
|
|
420 if (size > nextsize) {
|
|
421 GC_unmap((ptr_t)next, nextsize);
|
|
422 } else {
|
|
423 GC_remap((ptr_t)h, size);
|
|
424 hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
|
425 }
|
|
426 } else {
|
|
427 /* Unmap any gap in the middle */
|
|
428 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
|
|
429 }
|
|
430 /* If they are both unmapped, we merge, but leave unmapped. */
|
|
431 GC_remove_from_fl(hhdr, i);
|
|
432 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
|
|
433 hhdr -> hb_sz += nexthdr -> hb_sz;
|
|
434 GC_remove_header(next);
|
|
435 GC_add_to_fl(h, hhdr);
|
|
436 /* Start over at beginning of list */
|
|
437 h = GC_hblkfreelist[i];
|
|
438 } else /* not mergable with successor */ {
|
|
439 h = hhdr -> hb_next;
|
|
440 }
|
|
441 } /* while (h != 0) ... */
|
|
442 } /* for ... */
|
|
443 }
|
|
444
|
|
445 #endif /* USE_MUNMAP */
|
|
446
|
|
447 /*
|
|
448 * Return a pointer to a block starting at h of length bytes.
|
|
449 * Memory for the block is mapped.
|
|
450 * Remove the block from its free list, and return the remainder (if any)
|
|
451 * to its appropriate free list.
|
|
452 * May fail by returning 0.
|
|
453 * The header for the returned block must be set up by the caller.
|
|
454 * If the return value is not 0, then hhdr is the header for it.
|
|
455 */
|
|
456 struct hblk * GC_get_first_part(h, hhdr, bytes, index)
|
|
457 struct hblk *h;
|
|
458 hdr * hhdr;
|
|
459 word bytes;
|
|
460 int index;
|
|
461 {
|
|
462 word total_size = hhdr -> hb_sz;
|
|
463 struct hblk * rest;
|
|
464 hdr * rest_hdr;
|
|
465
|
|
466 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
|
|
467 GC_remove_from_fl(hhdr, index);
|
|
468 if (total_size == bytes) return h;
|
|
469 rest = (struct hblk *)((word)h + bytes);
|
|
470 rest_hdr = GC_install_header(rest);
|
|
471 if (0 == rest_hdr) return(0);
|
|
472 rest_hdr -> hb_sz = total_size - bytes;
|
|
473 rest_hdr -> hb_flags = 0;
|
|
474 # ifdef GC_ASSERTIONS
|
|
475 /* Mark h not free, to avoid assertion about adjacent free blocks. */
|
|
476 hhdr -> hb_map = 0;
|
|
477 # endif
|
|
478 GC_add_to_fl(rest, rest_hdr);
|
|
479 return h;
|
|
480 }
|
|
481
|
|
482 /*
|
|
483 * H is a free block. N points at an address inside it.
|
|
484 * A new header for n has already been set up. Fix up h's header
|
|
485 * to reflect the fact that it is being split, move it to the
|
|
486 * appropriate free list.
|
|
487 * N replaces h in the original free list.
|
|
488 *
|
|
489 * Nhdr is not completely filled in, since it is about to allocated.
|
|
490 * It may in fact end up on the wrong free list for its size.
|
|
491 * (Hence adding it to a free list is silly. But this path is hopefully
|
|
492 * rare enough that it doesn't matter. The code is cleaner this way.)
|
|
493 */
|
|
494 void GC_split_block(h, hhdr, n, nhdr, index)
|
|
495 struct hblk *h;
|
|
496 hdr * hhdr;
|
|
497 struct hblk *n;
|
|
498 hdr * nhdr;
|
|
499 int index; /* Index of free list */
|
|
500 {
|
|
501 word total_size = hhdr -> hb_sz;
|
|
502 word h_size = (word)n - (word)h;
|
|
503 struct hblk *prev = hhdr -> hb_prev;
|
|
504 struct hblk *next = hhdr -> hb_next;
|
|
505
|
|
506 /* Replace h with n on its freelist */
|
|
507 nhdr -> hb_prev = prev;
|
|
508 nhdr -> hb_next = next;
|
|
509 nhdr -> hb_sz = total_size - h_size;
|
|
510 nhdr -> hb_flags = 0;
|
|
511 if (0 != prev) {
|
|
512 HDR(prev) -> hb_next = n;
|
|
513 } else {
|
|
514 GC_hblkfreelist[index] = n;
|
|
515 }
|
|
516 if (0 != next) {
|
|
517 HDR(next) -> hb_prev = n;
|
|
518 }
|
|
519 INCR_FREE_BYTES(index, -(signed_word)h_size);
|
|
520 FREE_ASSERT(GC_free_bytes[index] > 0);
|
|
521 # ifdef GC_ASSERTIONS
|
|
522 nhdr -> hb_map = 0; /* Don't fail test for consecutive */
|
|
523 /* free blocks in GC_add_to_fl. */
|
|
524 # endif
|
|
525 # ifdef USE_MUNMAP
|
|
526 hhdr -> hb_last_reclaimed = GC_gc_no;
|
|
527 # endif
|
|
528 hhdr -> hb_sz = h_size;
|
|
529 GC_add_to_fl(h, hhdr);
|
|
530 GC_invalidate_map(nhdr);
|
|
531 }
|
|
532
|
|
533 struct hblk * GC_allochblk_nth();
|
|
534
|
|
535 /*
|
|
536 * Allocate (and return pointer to) a heap block
|
|
537 * for objects of size sz words, searching the nth free list.
|
|
538 *
|
|
539 * NOTE: We set obj_map field in header correctly.
|
|
540 * Caller is responsible for building an object freelist in block.
|
|
541 *
|
|
542 * Unlike older versions of the collectors, the client is responsible
|
|
543 * for clearing the block, if necessary.
|
|
544 */
|
|
545 struct hblk *
|
|
546 GC_allochblk(sz, kind, flags)
|
|
547 word sz;
|
|
548 int kind;
|
|
549 unsigned flags; /* IGNORE_OFF_PAGE or 0 */
|
|
550 {
|
|
551 word blocks = OBJ_SZ_TO_BLOCKS(sz);
|
|
552 int start_list = GC_hblk_fl_from_blocks(blocks);
|
|
553 int i;
|
|
554 for (i = start_list; i <= N_HBLK_FLS; ++i) {
|
|
555 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
|
|
556 if (0 != result) {
|
|
557 return result;
|
|
558 }
|
|
559 }
|
|
560 return 0;
|
|
561 }
|
|
562 /*
|
|
563 * The same, but with search restricted to nth free list.
|
|
564 */
|
|
565 struct hblk *
|
|
566 GC_allochblk_nth(sz, kind, flags, n)
|
|
567 word sz;
|
|
568 int kind;
|
|
569 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
|
|
570 int n;
|
|
571 {
|
|
572 register struct hblk *hbp;
|
|
573 register hdr * hhdr; /* Header corr. to hbp */
|
|
574 register struct hblk *thishbp;
|
|
575 register hdr * thishdr; /* Header corr. to hbp */
|
|
576 signed_word size_needed; /* number of bytes in requested objects */
|
|
577 signed_word size_avail; /* bytes available in this block */
|
|
578
|
|
579 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
|
|
580
|
|
581 /* search for a big enough block in free list */
|
|
582 hbp = GC_hblkfreelist[n];
|
|
583 for(; 0 != hbp; hbp = hhdr -> hb_next) {
|
|
584 GET_HDR(hbp, hhdr);
|
|
585 size_avail = hhdr->hb_sz;
|
|
586 if (size_avail < size_needed) continue;
|
|
587 if (!GC_use_entire_heap
|
|
588 && size_avail != size_needed
|
|
589 && USED_HEAP_SIZE >= GC_requested_heapsize
|
|
590 && !TRUE_INCREMENTAL && GC_should_collect()) {
|
|
591 # ifdef USE_MUNMAP
|
|
592 continue;
|
|
593 # else
|
|
594 /* If we have enough large blocks left to cover any */
|
|
595 /* previous request for large blocks, we go ahead */
|
|
596 /* and split. Assuming a steady state, that should */
|
|
597 /* be safe. It means that we can use the full */
|
|
598 /* heap if we allocate only small objects. */
|
|
599 if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
|
|
600 continue;
|
|
601 }
|
|
602 /* If we are deallocating lots of memory from */
|
|
603 /* finalizers, fail and collect sooner rather */
|
|
604 /* than later. */
|
|
605 if (GC_finalizer_mem_freed > (GC_heapsize >> 4)) {
|
|
606 continue;
|
|
607 }
|
|
608 # endif /* !USE_MUNMAP */
|
|
609 }
|
|
610 /* If the next heap block is obviously better, go on. */
|
|
611 /* This prevents us from disassembling a single large block */
|
|
612 /* to get tiny blocks. */
|
|
613 {
|
|
614 signed_word next_size;
|
|
615
|
|
616 thishbp = hhdr -> hb_next;
|
|
617 if (thishbp != 0) {
|
|
618 GET_HDR(thishbp, thishdr);
|
|
619 next_size = (signed_word)(thishdr -> hb_sz);
|
|
620 if (next_size < size_avail
|
|
621 && next_size >= size_needed
|
|
622 && !GC_is_black_listed(thishbp, (word)size_needed)) {
|
|
623 continue;
|
|
624 }
|
|
625 }
|
|
626 }
|
|
627 if ( !IS_UNCOLLECTABLE(kind) &&
|
|
628 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
|
|
629 struct hblk * lasthbp = hbp;
|
|
630 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
|
|
631 signed_word orig_avail = size_avail;
|
|
632 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
|
|
633 HBLKSIZE
|
|
634 : size_needed);
|
|
635
|
|
636
|
|
637 while ((ptr_t)lasthbp <= search_end
|
|
638 && (thishbp = GC_is_black_listed(lasthbp,
|
|
639 (word)eff_size_needed))
|
|
640 != 0) {
|
|
641 lasthbp = thishbp;
|
|
642 }
|
|
643 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
|
|
644 thishbp = lasthbp;
|
|
645 if (size_avail >= size_needed) {
|
|
646 if (thishbp != hbp &&
|
|
647 0 != (thishdr = GC_install_header(thishbp))) {
|
|
648 /* Make sure it's mapped before we mangle it. */
|
|
649 # ifdef USE_MUNMAP
|
|
650 if (!IS_MAPPED(hhdr)) {
|
|
651 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
|
|
652 hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
|
653 }
|
|
654 # endif
|
|
655 /* Split the block at thishbp */
|
|
656 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
|
|
657 /* Advance to thishbp */
|
|
658 hbp = thishbp;
|
|
659 hhdr = thishdr;
|
|
660 /* We must now allocate thishbp, since it may */
|
|
661 /* be on the wrong free list. */
|
|
662 }
|
|
663 } else if (size_needed > (signed_word)BL_LIMIT
|
|
664 && orig_avail - size_needed
|
|
665 > (signed_word)BL_LIMIT) {
|
|
666 /* Punt, since anything else risks unreasonable heap growth. */
|
|
667 if (++GC_large_alloc_warn_suppressed
|
|
668 >= GC_large_alloc_warn_interval) {
|
|
669 WARN("Repeated allocation of very large block "
|
|
670 "(appr. size %ld):\n"
|
|
671 "\tMay lead to memory leak and poor performance.\n",
|
|
672 size_needed);
|
|
673 GC_large_alloc_warn_suppressed = 0;
|
|
674 }
|
|
675 size_avail = orig_avail;
|
|
676 } else if (size_avail == 0 && size_needed == HBLKSIZE
|
|
677 && IS_MAPPED(hhdr)) {
|
|
678 if (!GC_find_leak) {
|
|
679 static unsigned count = 0;
|
|
680
|
|
681 /* The block is completely blacklisted. We need */
|
|
682 /* to drop some such blocks, since otherwise we spend */
|
|
683 /* all our time traversing them if pointerfree */
|
|
684 /* blocks are unpopular. */
|
|
685 /* A dropped block will be reconsidered at next GC. */
|
|
686 if ((++count & 3) == 0) {
|
|
687 /* Allocate and drop the block in small chunks, to */
|
|
688 /* maximize the chance that we will recover some */
|
|
689 /* later. */
|
|
690 word total_size = hhdr -> hb_sz;
|
|
691 struct hblk * limit = hbp + divHBLKSZ(total_size);
|
|
692 struct hblk * h;
|
|
693 struct hblk * prev = hhdr -> hb_prev;
|
|
694
|
|
695 GC_words_wasted += total_size;
|
|
696 GC_large_free_bytes -= total_size;
|
|
697 GC_remove_from_fl(hhdr, n);
|
|
698 for (h = hbp; h < limit; h++) {
|
|
699 if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
|
|
700 (void) setup_header(
|
|
701 hhdr,
|
|
702 BYTES_TO_WORDS(HBLKSIZE),
|
|
703 PTRFREE, 0); /* Cant fail */
|
|
704 if (GC_debugging_started) {
|
|
705 BZERO(h, HBLKSIZE);
|
|
706 }
|
|
707 }
|
|
708 }
|
|
709 /* Restore hbp to point at free block */
|
|
710 hbp = prev;
|
|
711 if (0 == hbp) {
|
|
712 return GC_allochblk_nth(sz, kind, flags, n);
|
|
713 }
|
|
714 hhdr = HDR(hbp);
|
|
715 }
|
|
716 }
|
|
717 }
|
|
718 }
|
|
719 if( size_avail >= size_needed ) {
|
|
720 # ifdef USE_MUNMAP
|
|
721 if (!IS_MAPPED(hhdr)) {
|
|
722 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
|
|
723 hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
|
724 }
|
|
725 # endif
|
|
726 /* hbp may be on the wrong freelist; the parameter n */
|
|
727 /* is important. */
|
|
728 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
|
|
729 break;
|
|
730 }
|
|
731 }
|
|
732
|
|
733 if (0 == hbp) return 0;
|
|
734
|
|
735 /* Add it to map of valid blocks */
|
|
736 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
|
|
737 /* This leaks memory under very rare conditions. */
|
|
738
|
|
739 /* Set up header */
|
|
740 if (!setup_header(hhdr, sz, kind, flags)) {
|
|
741 GC_remove_counts(hbp, (word)size_needed);
|
|
742 return(0); /* ditto */
|
|
743 }
|
|
744
|
|
745 /* Notify virtual dirty bit implementation that we are about to write. */
|
|
746 /* Ensure that pointerfree objects are not protected if it's avoidable. */
|
|
747 GC_remove_protection(hbp, divHBLKSZ(size_needed),
|
|
748 (hhdr -> hb_descr == 0) /* pointer-free */);
|
|
749
|
|
750 /* We just successfully allocated a block. Restart count of */
|
|
751 /* consecutive failures. */
|
|
752 {
|
|
753 extern unsigned GC_fail_count;
|
|
754
|
|
755 GC_fail_count = 0;
|
|
756 }
|
|
757
|
|
758 GC_large_free_bytes -= size_needed;
|
|
759
|
|
760 GC_ASSERT(IS_MAPPED(hhdr));
|
|
761 return( hbp );
|
|
762 }
|
|
763
|
|
764 struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
|
|
765
|
|
766 /*
|
|
767 * Free a heap block.
|
|
768 *
|
|
769 * Coalesce the block with its neighbors if possible.
|
|
770 *
|
|
771 * All mark words are assumed to be cleared.
|
|
772 */
|
|
773 void
|
|
774 GC_freehblk(hbp)
|
|
775 struct hblk *hbp;
|
|
776 {
|
|
777 struct hblk *next, *prev;
|
|
778 hdr *hhdr, *prevhdr, *nexthdr;
|
|
779 signed_word size;
|
|
780
|
|
781
|
|
782 GET_HDR(hbp, hhdr);
|
|
783 size = hhdr->hb_sz;
|
|
784 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
|
|
785 GC_remove_counts(hbp, (word)size);
|
|
786 hhdr->hb_sz = size;
|
|
787
|
|
788 /* Check for duplicate deallocation in the easy case */
|
|
789 if (HBLK_IS_FREE(hhdr)) {
|
|
790 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
|
|
791 (unsigned long) hbp);
|
|
792 ABORT("Duplicate large block deallocation");
|
|
793 }
|
|
794
|
|
795 GC_ASSERT(IS_MAPPED(hhdr));
|
|
796 GC_invalidate_map(hhdr);
|
|
797 next = (struct hblk *)((word)hbp + size);
|
|
798 GET_HDR(next, nexthdr);
|
|
799 prev = GC_free_block_ending_at(hbp);
|
|
800 /* Coalesce with successor, if possible */
|
|
801 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
|
|
802 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
|
|
803 hhdr -> hb_sz += nexthdr -> hb_sz;
|
|
804 GC_remove_header(next);
|
|
805 }
|
|
806 /* Coalesce with predecessor, if possible. */
|
|
807 if (0 != prev) {
|
|
808 prevhdr = HDR(prev);
|
|
809 if (IS_MAPPED(prevhdr)) {
|
|
810 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
|
|
811 prevhdr -> hb_sz += hhdr -> hb_sz;
|
|
812 GC_remove_header(hbp);
|
|
813 hbp = prev;
|
|
814 hhdr = prevhdr;
|
|
815 }
|
|
816 }
|
|
817
|
|
818 GC_large_free_bytes += size;
|
|
819 GC_add_to_fl(hbp, hhdr);
|
|
820 }
|
|
821
|