Mercurial > emacs
comparison gc/include/gc.h @ 51488:5de98dce4bd1
*** empty log message ***
author | Dave Love <fx@gnu.org> |
---|---|
date | Thu, 05 Jun 2003 17:49:22 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
51487:01d68b199093 | 51488:5de98dce4bd1 |
---|---|
1 /* | |
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers | |
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. | |
4 * Copyright 1996-1999 by Silicon Graphics. All rights reserved. | |
5 * Copyright 1999 by Hewlett-Packard Company. All rights reserved. | |
6 * | |
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
9 * | |
10 * Permission is hereby granted to use or copy this program | |
11 * for any purpose, provided the above notices are retained on all copies. | |
12 * Permission to modify the code and to distribute modified code is granted, | |
13 * provided the above notices are retained, and a notice that the code was | |
14 * modified is included with the above copyright notice. | |
15 */ | |
16 | |
17 /* | |
18 * Note that this defines a large number of tuning hooks, which can | |
19 * safely be ignored in nearly all cases. For normal use it suffices | |
20 * to call only GC_MALLOC and perhaps GC_REALLOC. | |
21 * For better performance, also look at GC_MALLOC_ATOMIC, and | |
22 * GC_enable_incremental. If you need an action to be performed | |
23 * immediately before an object is collected, look at GC_register_finalizer. | |
24 * If you are using Solaris threads, look at the end of this file. | |
25 * Everything else is best ignored unless you encounter performance | |
26 * problems. | |
27 */ | |
28 | |
29 #ifndef _GC_H | |
30 | |
31 # define _GC_H | |
32 | |
33 # include "gc_config_macros.h" | |
34 | |
35 # if defined(__STDC__) || defined(__cplusplus) | |
36 # define GC_PROTO(args) args | |
37 typedef void * GC_PTR; | |
38 # define GC_CONST const | |
39 # else | |
40 # define GC_PROTO(args) () | |
41 typedef char * GC_PTR; | |
42 # define GC_CONST | |
43 # endif | |
44 | |
45 # ifdef __cplusplus | |
46 extern "C" { | |
47 # endif | |
48 | |
49 | |
50 /* Define word and signed_word to be unsigned and signed types of the */ | |
51 /* size as char * or void *. There seems to be no way to do this */ | |
52 /* even semi-portably. The following is probably no better/worse */ | |
53 /* than almost anything else. */ | |
54 /* The ANSI standard suggests that size_t and ptr_diff_t might be */ | |
55 /* better choices. But those appear to have incorrect definitions */ | |
56 /* on may systems. Notably "typedef int size_t" seems to be both */ | |
57 /* frequent and WRONG. */ | |
58 typedef unsigned long GC_word; | |
59 typedef long GC_signed_word; | |
60 | |
61 /* Public read-only variables */ | |
62 | |
63 GC_API GC_word GC_gc_no;/* Counter incremented per collection. */ | |
64 /* Includes empty GCs at startup. */ | |
65 | |
66 GC_API int GC_parallel; /* GC is parallelized for performance on */ | |
67 /* multiprocessors. Currently set only */ | |
68 /* implicitly if collector is built with */ | |
69 /* -DPARALLEL_MARK and if either: */ | |
70 /* Env variable GC_NPROC is set to > 1, or */ | |
71 /* GC_NPROC is not set and this is an MP. */ | |
72 /* If GC_parallel is set, incremental */ | |
73 /* collection is only partially functional, */ | |
74 /* and may not be desirable. */ | |
75 | |
76 | |
77 /* Public R/W variables */ | |
78 | |
79 GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested)); | |
80 /* When there is insufficient memory to satisfy */ | |
81 /* an allocation request, we return */ | |
82 /* (*GC_oom_fn)(). By default this just */ | |
83 /* returns 0. */ | |
84 /* If it returns, it must return 0 or a valid */ | |
85 /* pointer to a previously allocated heap */ | |
86 /* object. */ | |
87 | |
88 GC_API int GC_find_leak; | |
89 /* Do not actually garbage collect, but simply */ | |
90 /* report inaccessible memory that was not */ | |
91 /* deallocated with GC_free. Initial value */ | |
92 /* is determined by FIND_LEAK macro. */ | |
93 | |
94 GC_API int GC_all_interior_pointers; | |
95 /* Arrange for pointers to object interiors to */ | |
96 /* be recognized as valid. May not be changed */ | |
97 /* after GC initialization. */ | |
98 /* Initial value is determined by */ | |
99 /* -DALL_INTERIOR_POINTERS. */ | |
100 /* Unless DONT_ADD_BYTE_AT_END is defined, this */ | |
101 /* also affects whether sizes are increased by */ | |
102 /* at least a byte to allow "off the end" */ | |
103 /* pointer recognition. */ | |
104 /* MUST BE 0 or 1. */ | |
105 | |
106 GC_API int GC_quiet; /* Disable statistics output. Only matters if */ | |
107 /* collector has been compiled with statistics */ | |
108 /* enabled. This involves a performance cost, */ | |
109 /* and is thus not the default. */ | |
110 | |
111 GC_API int GC_finalize_on_demand; | |
112 /* If nonzero, finalizers will only be run in */ | |
113 /* response to an explicit GC_invoke_finalizers */ | |
114 /* call. The default is determined by whether */ | |
115 /* the FINALIZE_ON_DEMAND macro is defined */ | |
116 /* when the collector is built. */ | |
117 | |
118 GC_API int GC_java_finalization; | |
119 /* Mark objects reachable from finalizable */ | |
120 /* objects in a separate postpass. This makes */ | |
121 /* it a bit safer to use non-topologically- */ | |
122 /* ordered finalization. Default value is */ | |
123 /* determined by JAVA_FINALIZATION macro. */ | |
124 | |
125 GC_API void (* GC_finalizer_notifier)(); | |
126 /* Invoked by the collector when there are */ | |
127 /* objects to be finalized. Invoked at most */ | |
128 /* once per GC cycle. Never invoked unless */ | |
129 /* GC_finalize_on_demand is set. */ | |
130 /* Typically this will notify a finalization */ | |
131 /* thread, which will call GC_invoke_finalizers */ | |
132 /* in response. */ | |
133 | |
134 GC_API int GC_dont_gc; /* != 0 ==> Dont collect. In versions 7.2a1+, */ | |
135 /* this overrides explicit GC_gcollect() calls. */ | |
136 /* Used as a counter, so that nested enabling */ | |
137 /* and disabling work correctly. Should */ | |
138 /* normally be updated with GC_enable() and */ | |
139 /* GC_disable() calls. */ | |
140 /* Direct assignment to GC_dont_gc is */ | |
141 /* deprecated. */ | |
142 | |
143 GC_API int GC_dont_expand; | |
144 /* Dont expand heap unless explicitly requested */ | |
145 /* or forced to. */ | |
146 | |
147 GC_API int GC_use_entire_heap; | |
148 /* Causes the nonincremental collector to use the */ | |
149 /* entire heap before collecting. This was the only */ | |
150 /* option for GC versions < 5.0. This sometimes */ | |
151 /* results in more large block fragmentation, since */ | |
152 /* very larg blocks will tend to get broken up */ | |
153 /* during each GC cycle. It is likely to result in a */ | |
154 /* larger working set, but lower collection */ | |
155 /* frequencies, and hence fewer instructions executed */ | |
156 /* in the collector. */ | |
157 | |
158 GC_API int GC_full_freq; /* Number of partial collections between */ | |
159 /* full collections. Matters only if */ | |
160 /* GC_incremental is set. */ | |
161 /* Full collections are also triggered if */ | |
162 /* the collector detects a substantial */ | |
163 /* increase in the number of in-use heap */ | |
164 /* blocks. Values in the tens are now */ | |
165 /* perfectly reasonable, unlike for */ | |
166 /* earlier GC versions. */ | |
167 | |
168 GC_API GC_word GC_non_gc_bytes; | |
169 /* Bytes not considered candidates for collection. */ | |
170 /* Used only to control scheduling of collections. */ | |
171 /* Updated by GC_malloc_uncollectable and GC_free. */ | |
172 /* Wizards only. */ | |
173 | |
174 GC_API int GC_no_dls; | |
175 /* Don't register dynamic library data segments. */ | |
176 /* Wizards only. Should be used only if the */ | |
177 /* application explicitly registers all roots. */ | |
178 /* In Microsoft Windows environments, this will */ | |
179 /* usually also prevent registration of the */ | |
180 /* main data segment as part of the root set. */ | |
181 | |
182 GC_API GC_word GC_free_space_divisor; | |
183 /* We try to make sure that we allocate at */ | |
184 /* least N/GC_free_space_divisor bytes between */ | |
185 /* collections, where N is the heap size plus */ | |
186 /* a rough estimate of the root set size. */ | |
187 /* Initially, GC_free_space_divisor = 4. */ | |
188 /* Increasing its value will use less space */ | |
189 /* but more collection time. Decreasing it */ | |
190 /* will appreciably decrease collection time */ | |
191 /* at the expense of space. */ | |
192 /* GC_free_space_divisor = 1 will effectively */ | |
193 /* disable collections. */ | |
194 | |
195 GC_API GC_word GC_max_retries; | |
196 /* The maximum number of GCs attempted before */ | |
197 /* reporting out of memory after heap */ | |
198 /* expansion fails. Initially 0. */ | |
199 | |
200 | |
201 GC_API char *GC_stackbottom; /* Cool end of user stack. */ | |
202 /* May be set in the client prior to */ | |
203 /* calling any GC_ routines. This */ | |
204 /* avoids some overhead, and */ | |
205 /* potentially some signals that can */ | |
206 /* confuse debuggers. Otherwise the */ | |
207 /* collector attempts to set it */ | |
208 /* automatically. */ | |
209 /* For multithreaded code, this is the */ | |
210 /* cold end of the stack for the */ | |
211 /* primordial thread. */ | |
212 | |
213 GC_API int GC_dont_precollect; /* Don't collect as part of */ | |
214 /* initialization. Should be set only */ | |
215 /* if the client wants a chance to */ | |
216 /* manually initialize the root set */ | |
217 /* before the first collection. */ | |
218 /* Interferes with blacklisting. */ | |
219 /* Wizards only. */ | |
220 | |
221 GC_API unsigned long GC_time_limit; | |
222 /* If incremental collection is enabled, */ | |
223 /* We try to terminate collections */ | |
224 /* after this many milliseconds. Not a */ | |
225 /* hard time bound. Setting this to */ | |
226 /* GC_TIME_UNLIMITED will essentially */ | |
227 /* disable incremental collection while */ | |
228 /* leaving generational collection */ | |
229 /* enabled. */ | |
230 # define GC_TIME_UNLIMITED 999999 | |
231 /* Setting GC_time_limit to this value */ | |
232 /* will disable the "pause time exceeded"*/ | |
233 /* tests. */ | |
234 | |
235 /* Public procedures */ | |
236 | |
237 /* Initialize the collector. This is only required when using thread-local | |
238 * allocation, since unlike the regular allocation routines, GC_local_malloc | |
239 * is not self-initializing. If you use GC_local_malloc you should arrange | |
240 * to call this somehow (e.g. from a constructor) before doing any allocation. | |
241 */ | |
242 GC_API void GC_init GC_PROTO((void)); | |
243 | |
244 /* | |
245 * general purpose allocation routines, with roughly malloc calling conv. | |
246 * The atomic versions promise that no relevant pointers are contained | |
247 * in the object. The nonatomic versions guarantee that the new object | |
248 * is cleared. GC_malloc_stubborn promises that no changes to the object | |
249 * will occur after GC_end_stubborn_change has been called on the | |
250 * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object | |
251 * that is scanned for pointers to collectable objects, but is not itself | |
252 * collectable. The object is scanned even if it does not appear to | |
253 * be reachable. GC_malloc_uncollectable and GC_free called on the resulting | |
254 * object implicitly update GC_non_gc_bytes appropriately. | |
255 * | |
256 * Note that the GC_malloc_stubborn support is stubbed out by default | |
257 * starting in 6.0. GC_malloc_stubborn is an alias for GC_malloc unless | |
258 * the collector is built with STUBBORN_ALLOC defined. | |
259 */ | |
260 GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes)); | |
261 GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes)); | |
262 GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes)); | |
263 GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes)); | |
264 | |
265 /* The following is only defined if the library has been suitably */ | |
266 /* compiled: */ | |
267 GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes)); | |
268 | |
269 /* Explicitly deallocate an object. Dangerous if used incorrectly. */ | |
270 /* Requires a pointer to the base of an object. */ | |
271 /* If the argument is stubborn, it should not be changeable when freed. */ | |
272 /* An object should not be enable for finalization when it is */ | |
273 /* explicitly deallocated. */ | |
274 /* GC_free(0) is a no-op, as required by ANSI C for free. */ | |
275 GC_API void GC_free GC_PROTO((GC_PTR object_addr)); | |
276 | |
277 /* | |
278 * Stubborn objects may be changed only if the collector is explicitly informed. | |
279 * The collector is implicitly informed of coming change when such | |
280 * an object is first allocated. The following routines inform the | |
281 * collector that an object will no longer be changed, or that it will | |
282 * once again be changed. Only nonNIL pointer stores into the object | |
283 * are considered to be changes. The argument to GC_end_stubborn_change | |
284 * must be exacly the value returned by GC_malloc_stubborn or passed to | |
285 * GC_change_stubborn. (In the second case it may be an interior pointer | |
286 * within 512 bytes of the beginning of the objects.) | |
287 * There is a performance penalty for allowing more than | |
288 * one stubborn object to be changed at once, but it is acceptable to | |
289 * do so. The same applies to dropping stubborn objects that are still | |
290 * changeable. | |
291 */ | |
292 GC_API void GC_change_stubborn GC_PROTO((GC_PTR)); | |
293 GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR)); | |
294 | |
295 /* Return a pointer to the base (lowest address) of an object given */ | |
296 /* a pointer to a location within the object. */ | |
297 /* I.e. map an interior pointer to the corresponding bas pointer. */ | |
298 /* Note that with debugging allocation, this returns a pointer to the */ | |
299 /* actual base of the object, i.e. the debug information, not to */ | |
300 /* the base of the user object. */ | |
301 /* Return 0 if displaced_pointer doesn't point to within a valid */ | |
302 /* object. */ | |
303 GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer)); | |
304 | |
305 /* Given a pointer to the base of an object, return its size in bytes. */ | |
306 /* The returned size may be slightly larger than what was originally */ | |
307 /* requested. */ | |
308 GC_API size_t GC_size GC_PROTO((GC_PTR object_addr)); | |
309 | |
310 /* For compatibility with C library. This is occasionally faster than */ | |
311 /* a malloc followed by a bcopy. But if you rely on that, either here */ | |
312 /* or with the standard C library, your code is broken. In my */ | |
313 /* opinion, it shouldn't have been invented, but now we're stuck. -HB */ | |
314 /* The resulting object has the same kind as the original. */ | |
315 /* If the argument is stubborn, the result will have changes enabled. */ | |
316 /* It is an error to have changes enabled for the original object. */ | |
317 /* Follows ANSI comventions for NULL old_object. */ | |
318 GC_API GC_PTR GC_realloc | |
319 GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes)); | |
320 | |
321 /* Explicitly increase the heap size. */ | |
322 /* Returns 0 on failure, 1 on success. */ | |
323 GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes)); | |
324 | |
325 /* Limit the heap size to n bytes. Useful when you're debugging, */ | |
326 /* especially on systems that don't handle running out of memory well. */ | |
327 /* n == 0 ==> unbounded. This is the default. */ | |
328 GC_API void GC_set_max_heap_size GC_PROTO((GC_word n)); | |
329 | |
330 /* Inform the collector that a certain section of statically allocated */ | |
331 /* memory contains no pointers to garbage collected memory. Thus it */ | |
332 /* need not be scanned. This is sometimes important if the application */ | |
333 /* maps large read/write files into the address space, which could be */ | |
334 /* mistaken for dynamic library data segments on some systems. */ | |
335 GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish)); | |
336 | |
337 /* Clear the set of root segments. Wizards only. */ | |
338 GC_API void GC_clear_roots GC_PROTO((void)); | |
339 | |
340 /* Add a root segment. Wizards only. */ | |
341 GC_API void GC_add_roots GC_PROTO((char * low_address, | |
342 char * high_address_plus_1)); | |
343 | |
344 /* Add a displacement to the set of those considered valid by the */ | |
345 /* collector. GC_register_displacement(n) means that if p was returned */ | |
346 /* by GC_malloc, then (char *)p + n will be considered to be a valid */ | |
347 /* pointer to n. N must be small and less than the size of p. */ | |
348 /* (All pointers to the interior of objects from the stack are */ | |
349 /* considered valid in any case. This applies to heap objects and */ | |
350 /* static data.) */ | |
351 /* Preferably, this should be called before any other GC procedures. */ | |
352 /* Calling it later adds to the probability of excess memory */ | |
353 /* retention. */ | |
354 /* This is a no-op if the collector was compiled with recognition of */ | |
355 /* arbitrary interior pointers enabled, which is now the default. */ | |
356 GC_API void GC_register_displacement GC_PROTO((GC_word n)); | |
357 | |
358 /* The following version should be used if any debugging allocation is */ | |
359 /* being done. */ | |
360 GC_API void GC_debug_register_displacement GC_PROTO((GC_word n)); | |
361 | |
362 /* Explicitly trigger a full, world-stop collection. */ | |
363 GC_API void GC_gcollect GC_PROTO((void)); | |
364 | |
365 /* Trigger a full world-stopped collection. Abort the collection if */ | |
366 /* and when stop_func returns a nonzero value. Stop_func will be */ | |
367 /* called frequently, and should be reasonably fast. This works even */ | |
368 /* if virtual dirty bits, and hence incremental collection is not */ | |
369 /* available for this architecture. Collections can be aborted faster */ | |
370 /* than normal pause times for incremental collection. However, */ | |
371 /* aborted collections do no useful work; the next collection needs */ | |
372 /* to start from the beginning. */ | |
373 /* Return 0 if the collection was aborted, 1 if it succeeded. */ | |
374 typedef int (* GC_stop_func) GC_PROTO((void)); | |
375 GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func)); | |
376 | |
377 /* Return the number of bytes in the heap. Excludes collector private */ | |
378 /* data structures. Includes empty blocks and fragmentation loss. */ | |
379 /* Includes some pages that were allocated but never written. */ | |
380 GC_API size_t GC_get_heap_size GC_PROTO((void)); | |
381 | |
382 /* Return a lower bound on the number of free bytes in the heap. */ | |
383 GC_API size_t GC_get_free_bytes GC_PROTO((void)); | |
384 | |
385 /* Return the number of bytes allocated since the last collection. */ | |
386 GC_API size_t GC_get_bytes_since_gc GC_PROTO((void)); | |
387 | |
388 /* Return the total number of bytes allocated in this process. */ | |
389 /* Never decreases, except due to wrapping. */ | |
390 GC_API size_t GC_get_total_bytes GC_PROTO((void)); | |
391 | |
392 /* Disable garbage collection. Even GC_gcollect calls will be */ | |
393 /* ineffective. */ | |
394 GC_API void GC_disable GC_PROTO((void)); | |
395 | |
396 /* Reenable garbage collection. GC_diable() and GC_enable() calls */ | |
397 /* nest. Garbage collection is enabled if the number of calls to both */ | |
398 /* both functions is equal. */ | |
399 GC_API void GC_enable GC_PROTO((void)); | |
400 | |
401 /* Enable incremental/generational collection. */ | |
402 /* Not advisable unless dirty bits are */ | |
403 /* available or most heap objects are */ | |
404 /* pointerfree(atomic) or immutable. */ | |
405 /* Don't use in leak finding mode. */ | |
406 /* Ignored if GC_dont_gc is true. */ | |
407 /* Only the generational piece of this is */ | |
408 /* functional if GC_parallel is TRUE */ | |
409 /* or if GC_time_limit is GC_TIME_UNLIMITED. */ | |
410 /* Causes GC_local_gcj_malloc() to revert to */ | |
411 /* locked allocation. Must be called */ | |
412 /* before any GC_local_gcj_malloc() calls. */ | |
413 GC_API void GC_enable_incremental GC_PROTO((void)); | |
414 | |
415 /* Does incremental mode write-protect pages? Returns zero or */ | |
416 /* more of the following, or'ed together: */ | |
417 #define GC_PROTECTS_POINTER_HEAP 1 /* May protect non-atomic objs. */ | |
418 #define GC_PROTECTS_PTRFREE_HEAP 2 | |
419 #define GC_PROTECTS_STATIC_DATA 4 /* Curently never. */ | |
420 #define GC_PROTECTS_STACK 8 /* Probably impractical. */ | |
421 | |
422 #define GC_PROTECTS_NONE 0 | |
423 GC_API int GC_incremental_protection_needs GC_PROTO((void)); | |
424 | |
425 /* Perform some garbage collection work, if appropriate. */ | |
426 /* Return 0 if there is no more work to be done. */ | |
427 /* Typically performs an amount of work corresponding roughly */ | |
428 /* to marking from one page. May do more work if further */ | |
429 /* progress requires it, e.g. if incremental collection is */ | |
430 /* disabled. It is reasonable to call this in a wait loop */ | |
431 /* until it returns 0. */ | |
432 GC_API int GC_collect_a_little GC_PROTO((void)); | |
433 | |
434 /* Allocate an object of size lb bytes. The client guarantees that */ | |
435 /* as long as the object is live, it will be referenced by a pointer */ | |
436 /* that points to somewhere within the first 256 bytes of the object. */ | |
437 /* (This should normally be declared volatile to prevent the compiler */ | |
438 /* from invalidating this assertion.) This routine is only useful */ | |
439 /* if a large array is being allocated. It reduces the chance of */ | |
440 /* accidentally retaining such an array as a result of scanning an */ | |
441 /* integer that happens to be an address inside the array. (Actually, */ | |
442 /* it reduces the chance of the allocator not finding space for such */ | |
443 /* an array, since it will try hard to avoid introducing such a false */ | |
444 /* reference.) On a SunOS 4.X or MS Windows system this is recommended */ | |
445 /* for arrays likely to be larger than 100K or so. For other systems, */ | |
446 /* or if the collector is not configured to recognize all interior */ | |
447 /* pointers, the threshold is normally much higher. */ | |
448 GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb)); | |
449 GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb)); | |
450 | |
451 #if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720 | |
452 # define GC_ADD_CALLER | |
453 # define GC_RETURN_ADDR (GC_word)__return_address | |
454 #endif | |
455 | |
456 #ifdef __linux__ | |
457 # include <features.h> | |
458 # if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2) \ | |
459 && !defined(__ia64__) | |
460 # define GC_HAVE_BUILTIN_BACKTRACE | |
461 # define GC_CAN_SAVE_CALL_STACKS | |
462 # endif | |
463 # if defined(__i386__) || defined(__x86_64__) | |
464 # define GC_CAN_SAVE_CALL_STACKS | |
465 # endif | |
466 #endif | |
467 | |
468 #if defined(__sparc__) | |
469 # define GC_CAN_SAVE_CALL_STACKS | |
470 #endif | |
471 | |
472 /* If we're on an a platform on which we can't save call stacks, but */ | |
473 /* gcc is normally used, we go ahead and define GC_ADD_CALLER. */ | |
474 /* We make this decision independent of whether gcc is actually being */ | |
475 /* used, in order to keep the interface consistent, and allow mixing */ | |
476 /* of compilers. */ | |
477 /* This may also be desirable if it is possible but expensive to */ | |
478 /* retrieve the call chain. */ | |
479 #if (defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) \ | |
480 || defined(__FreeBSD__)) & !defined(GC_CAN_SAVE_CALL_STACKS) | |
481 # define GC_ADD_CALLER | |
482 # if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95) | |
483 /* gcc knows how to retrieve return address, but we don't know */ | |
484 /* how to generate call stacks. */ | |
485 # define GC_RETURN_ADDR (GC_word)__builtin_return_address(0) | |
486 # else | |
487 /* Just pass 0 for gcc compatibility. */ | |
488 # define GC_RETURN_ADDR 0 | |
489 # endif | |
490 #endif | |
491 | |
492 #ifdef GC_ADD_CALLER | |
493 # define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__ | |
494 # define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s, int i | |
495 #else | |
496 # define GC_EXTRAS __FILE__, __LINE__ | |
497 # define GC_EXTRA_PARAMS GC_CONST char * s, int i | |
498 #endif | |
499 | |
500 /* Debugging (annotated) allocation. GC_gcollect will check */ | |
501 /* objects allocated in this way for overwrites, etc. */ | |
502 GC_API GC_PTR GC_debug_malloc | |
503 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | |
504 GC_API GC_PTR GC_debug_malloc_atomic | |
505 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | |
506 GC_API GC_PTR GC_debug_malloc_uncollectable | |
507 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | |
508 GC_API GC_PTR GC_debug_malloc_stubborn | |
509 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | |
510 GC_API GC_PTR GC_debug_malloc_ignore_off_page | |
511 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | |
512 GC_API GC_PTR GC_debug_malloc_atomic_ignore_off_page | |
513 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | |
514 GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr)); | |
515 GC_API GC_PTR GC_debug_realloc | |
516 GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes, | |
517 GC_EXTRA_PARAMS)); | |
518 GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR)); | |
519 GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR)); | |
520 | |
521 /* Routines that allocate objects with debug information (like the */ | |
522 /* above), but just fill in dummy file and line number information. */ | |
523 /* Thus they can serve as drop-in malloc/realloc replacements. This */ | |
524 /* can be useful for two reasons: */ | |
525 /* 1) It allows the collector to be built with DBG_HDRS_ALL defined */ | |
526 /* even if some allocation calls come from 3rd party libraries */ | |
527 /* that can't be recompiled. */ | |
528 /* 2) On some platforms, the file and line information is redundant, */ | |
529 /* since it can be reconstructed from a stack trace. On such */ | |
530 /* platforms it may be more convenient not to recompile, e.g. for */ | |
531 /* leak detection. This can be accomplished by instructing the */ | |
532 /* linker to replace malloc/realloc with these. */ | |
533 GC_API GC_PTR GC_debug_malloc_replacement GC_PROTO((size_t size_in_bytes)); | |
534 GC_API GC_PTR GC_debug_realloc_replacement | |
535 GC_PROTO((GC_PTR object_addr, size_t size_in_bytes)); | |
536 | |
537 # ifdef GC_DEBUG | |
538 # define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS) | |
539 # define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS) | |
540 # define GC_MALLOC_UNCOLLECTABLE(sz) \ | |
541 GC_debug_malloc_uncollectable(sz, GC_EXTRAS) | |
542 # define GC_MALLOC_IGNORE_OFF_PAGE(sz) \ | |
543 GC_debug_malloc_ignore_off_page(sz, GC_EXTRAS) | |
544 # define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \ | |
545 GC_debug_malloc_atomic_ignore_off_page(sz, GC_EXTRAS) | |
546 # define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS) | |
547 # define GC_FREE(p) GC_debug_free(p) | |
548 # define GC_REGISTER_FINALIZER(p, f, d, of, od) \ | |
549 GC_debug_register_finalizer(p, f, d, of, od) | |
550 # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \ | |
551 GC_debug_register_finalizer_ignore_self(p, f, d, of, od) | |
552 # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \ | |
553 GC_debug_register_finalizer_no_order(p, f, d, of, od) | |
554 # define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS); | |
555 # define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p) | |
556 # define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p) | |
557 # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \ | |
558 GC_general_register_disappearing_link(link, GC_base(obj)) | |
559 # define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n) | |
560 # else | |
561 # define GC_MALLOC(sz) GC_malloc(sz) | |
562 # define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz) | |
563 # define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz) | |
564 # define GC_MALLOC_IGNORE_OFF_PAGE(sz) \ | |
565 GC_malloc_ignore_off_page(sz) | |
566 # define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \ | |
567 GC_malloc_atomic_ignore_off_page(sz) | |
568 # define GC_REALLOC(old, sz) GC_realloc(old, sz) | |
569 # define GC_FREE(p) GC_free(p) | |
570 # define GC_REGISTER_FINALIZER(p, f, d, of, od) \ | |
571 GC_register_finalizer(p, f, d, of, od) | |
572 # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \ | |
573 GC_register_finalizer_ignore_self(p, f, d, of, od) | |
574 # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \ | |
575 GC_register_finalizer_no_order(p, f, d, of, od) | |
576 # define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz) | |
577 # define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p) | |
578 # define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p) | |
579 # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \ | |
580 GC_general_register_disappearing_link(link, obj) | |
581 # define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n) | |
582 # endif | |
583 /* The following are included because they are often convenient, and */ | |
584 /* reduce the chance for a misspecifed size argument. But calls may */ | |
585 /* expand to something syntactically incorrect if t is a complicated */ | |
586 /* type expression. */ | |
587 # define GC_NEW(t) (t *)GC_MALLOC(sizeof (t)) | |
588 # define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t)) | |
589 # define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t)) | |
590 # define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t)) | |
591 | |
592 /* Finalization. Some of these primitives are grossly unsafe. */ | |
593 /* The idea is to make them both cheap, and sufficient to build */ | |
594 /* a safer layer, closer to PCedar finalization. */ | |
595 /* The interface represents my conclusions from a long discussion */ | |
596 /* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */ | |
597 /* Christian Jacobi, and Russ Atkinson. It's not perfect, and */ | |
598 /* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */ | |
599 typedef void (*GC_finalization_proc) | |
600 GC_PROTO((GC_PTR obj, GC_PTR client_data)); | |
601 | |
602 GC_API void GC_register_finalizer | |
603 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | |
604 GC_finalization_proc *ofn, GC_PTR *ocd)); | |
605 GC_API void GC_debug_register_finalizer | |
606 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | |
607 GC_finalization_proc *ofn, GC_PTR *ocd)); | |
608 /* When obj is no longer accessible, invoke */ | |
609 /* (*fn)(obj, cd). If a and b are inaccessible, and */ | |
610 /* a points to b (after disappearing links have been */ | |
611 /* made to disappear), then only a will be */ | |
612 /* finalized. (If this does not create any new */ | |
613 /* pointers to b, then b will be finalized after the */ | |
614 /* next collection.) Any finalizable object that */ | |
615 /* is reachable from itself by following one or more */ | |
616 /* pointers will not be finalized (or collected). */ | |
617 /* Thus cycles involving finalizable objects should */ | |
618 /* be avoided, or broken by disappearing links. */ | |
619 /* All but the last finalizer registered for an object */ | |
620 /* is ignored. */ | |
621 /* Finalization may be removed by passing 0 as fn. */ | |
622 /* Finalizers are implicitly unregistered just before */ | |
623 /* they are invoked. */ | |
624 /* The old finalizer and client data are stored in */ | |
625 /* *ofn and *ocd. */ | |
626 /* Fn is never invoked on an accessible object, */ | |
627 /* provided hidden pointers are converted to real */ | |
628 /* pointers only if the allocation lock is held, and */ | |
629 /* such conversions are not performed by finalization */ | |
630 /* routines. */ | |
631 /* If GC_register_finalizer is aborted as a result of */ | |
632 /* a signal, the object may be left with no */ | |
633 /* finalization, even if neither the old nor new */ | |
634 /* finalizer were NULL. */ | |
635 /* Obj should be the nonNULL starting address of an */ | |
636 /* object allocated by GC_malloc or friends. */ | |
637 /* Note that any garbage collectable object referenced */ | |
638 /* by cd will be considered accessible until the */ | |
639 /* finalizer is invoked. */ | |
640 | |
641 /* Another versions of the above follow. It ignores */ | |
642 /* self-cycles, i.e. pointers from a finalizable object to */ | |
643 /* itself. There is a stylistic argument that this is wrong, */ | |
644 /* but it's unavoidable for C++, since the compiler may */ | |
645 /* silently introduce these. It's also benign in that specific */ | |
646 /* case. And it helps if finalizable objects are split to */ | |
647 /* avoid cycles. */ | |
648 /* Note that cd will still be viewed as accessible, even if it */ | |
649 /* refers to the object itself. */ | |
650 GC_API void GC_register_finalizer_ignore_self | |
651 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | |
652 GC_finalization_proc *ofn, GC_PTR *ocd)); | |
653 GC_API void GC_debug_register_finalizer_ignore_self | |
654 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | |
655 GC_finalization_proc *ofn, GC_PTR *ocd)); | |
656 | |
657 /* Another version of the above. It ignores all cycles. */ | |
658 /* It should probably only be used by Java implementations. */ | |
659 /* Note that cd will still be viewed as accessible, even if it */ | |
660 /* refers to the object itself. */ | |
661 GC_API void GC_register_finalizer_no_order | |
662 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | |
663 GC_finalization_proc *ofn, GC_PTR *ocd)); | |
664 GC_API void GC_debug_register_finalizer_no_order | |
665 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | |
666 GC_finalization_proc *ofn, GC_PTR *ocd)); | |
667 | |
668 | |
669 /* The following routine may be used to break cycles between */ | |
670 /* finalizable objects, thus causing cyclic finalizable */ | |
671 /* objects to be finalized in the correct order. Standard */ | |
672 /* use involves calling GC_register_disappearing_link(&p), */ | |
673 /* where p is a pointer that is not followed by finalization */ | |
674 /* code, and should not be considered in determining */ | |
675 /* finalization order. */ | |
676 GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */)); | |
677 /* Link should point to a field of a heap allocated */ | |
678 /* object obj. *link will be cleared when obj is */ | |
679 /* found to be inaccessible. This happens BEFORE any */ | |
680 /* finalization code is invoked, and BEFORE any */ | |
681 /* decisions about finalization order are made. */ | |
682 /* This is useful in telling the finalizer that */ | |
683 /* some pointers are not essential for proper */ | |
684 /* finalization. This may avoid finalization cycles. */ | |
685 /* Note that obj may be resurrected by another */ | |
686 /* finalizer, and thus the clearing of *link may */ | |
687 /* be visible to non-finalization code. */ | |
688 /* There's an argument that an arbitrary action should */ | |
689 /* be allowed here, instead of just clearing a pointer. */ | |
690 /* But this causes problems if that action alters, or */ | |
691 /* examines connectivity. */ | |
692 /* Returns 1 if link was already registered, 0 */ | |
693 /* otherwise. */ | |
694 /* Only exists for backward compatibility. See below: */ | |
695 | |
696 GC_API int GC_general_register_disappearing_link | |
697 GC_PROTO((GC_PTR * /* link */, GC_PTR obj)); | |
698 /* A slight generalization of the above. *link is */ | |
699 /* cleared when obj first becomes inaccessible. This */ | |
700 /* can be used to implement weak pointers easily and */ | |
701 /* safely. Typically link will point to a location */ | |
702 /* holding a disguised pointer to obj. (A pointer */ | |
703 /* inside an "atomic" object is effectively */ | |
704 /* disguised.) In this way soft */ | |
705 /* pointers are broken before any object */ | |
706 /* reachable from them are finalized. Each link */ | |
707 /* May be registered only once, i.e. with one obj */ | |
708 /* value. This was added after a long email discussion */ | |
709 /* with John Ellis. */ | |
710 /* Obj must be a pointer to the first word of an object */ | |
711 /* we allocated. It is unsafe to explicitly deallocate */ | |
712 /* the object containing link. Explicitly deallocating */ | |
713 /* obj may or may not cause link to eventually be */ | |
714 /* cleared. */ | |
715 GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */)); | |
716 /* Returns 0 if link was not actually registered. */ | |
717 /* Undoes a registration by either of the above two */ | |
718 /* routines. */ | |
719 | |
720 /* Returns !=0 if GC_invoke_finalizers has something to do. */ | |
721 GC_API int GC_should_invoke_finalizers GC_PROTO((void)); | |
722 | |
723 GC_API int GC_invoke_finalizers GC_PROTO((void)); | |
724 /* Run finalizers for all objects that are ready to */ | |
725 /* be finalized. Return the number of finalizers */ | |
726 /* that were run. Normally this is also called */ | |
727 /* implicitly during some allocations. If */ | |
728 /* GC-finalize_on_demand is nonzero, it must be called */ | |
729 /* explicitly. */ | |
730 | |
731 /* GC_set_warn_proc can be used to redirect or filter warning messages. */ | |
732 /* p may not be a NULL pointer. */ | |
733 typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg)); | |
734 GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p)); | |
735 /* Returns old warning procedure. */ | |
736 | |
737 GC_API GC_word GC_set_free_space_divisor GC_PROTO((GC_word value)); | |
738 /* Set free_space_divisor. See above for definition. */ | |
739 /* Returns old value. */ | |
740 | |
741 /* The following is intended to be used by a higher level */ | |
742 /* (e.g. Java-like) finalization facility. It is expected */ | |
743 /* that finalization code will arrange for hidden pointers to */ | |
744 /* disappear. Otherwise objects can be accessed after they */ | |
745 /* have been collected. */ | |
746 /* Note that putting pointers in atomic objects or in */ | |
747 /* nonpointer slots of "typed" objects is equivalent to */ | |
748 /* disguising them in this way, and may have other advantages. */ | |
749 # if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS) | |
750 typedef GC_word GC_hidden_pointer; | |
751 # define HIDE_POINTER(p) (~(GC_hidden_pointer)(p)) | |
752 # define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p))) | |
753 /* Converting a hidden pointer to a real pointer requires verifying */ | |
754 /* that the object still exists. This involves acquiring the */ | |
755 /* allocator lock to avoid a race with the collector. */ | |
756 # endif /* I_HIDE_POINTERS */ | |
757 | |
758 typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data)); | |
759 GC_API GC_PTR GC_call_with_alloc_lock | |
760 GC_PROTO((GC_fn_type fn, GC_PTR client_data)); | |
761 | |
762 /* The following routines are primarily intended for use with a */ | |
763 /* preprocessor which inserts calls to check C pointer arithmetic. */ | |
764 | |
765 /* Check that p and q point to the same object. */ | |
766 /* Fail conspicuously if they don't. */ | |
767 /* Returns the first argument. */ | |
768 /* Succeeds if neither p nor q points to the heap. */ | |
769 /* May succeed if both p and q point to between heap objects. */ | |
770 GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q)); | |
771 | |
772 /* Checked pointer pre- and post- increment operations. Note that */ | |
773 /* the second argument is in units of bytes, not multiples of the */ | |
774 /* object size. This should either be invoked from a macro, or the */ | |
775 /* call should be automatically generated. */ | |
776 GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much)); | |
777 GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much)); | |
778 | |
779 /* Check that p is visible */ | |
780 /* to the collector as a possibly pointer containing location. */ | |
781 /* If it isn't fail conspicuously. */ | |
782 /* Returns the argument in all cases. May erroneously succeed */ | |
783 /* in hard cases. (This is intended for debugging use with */ | |
784 /* untyped allocations. The idea is that it should be possible, though */ | |
785 /* slow, to add such a call to all indirect pointer stores.) */ | |
786 /* Currently useless for multithreaded worlds. */ | |
787 GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p)); | |
788 | |
789 /* Check that if p is a pointer to a heap page, then it points to */ | |
790 /* a valid displacement within a heap object. */ | |
791 /* Fail conspicuously if this property does not hold. */ | |
792 /* Uninteresting with GC_all_interior_pointers. */ | |
793 /* Always returns its argument. */ | |
794 GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p)); | |
795 | |
796 /* Safer, but slow, pointer addition. Probably useful mainly with */ | |
797 /* a preprocessor. Useful only for heap pointers. */ | |
798 #ifdef GC_DEBUG | |
799 # define GC_PTR_ADD3(x, n, type_of_result) \ | |
800 ((type_of_result)GC_same_obj((x)+(n), (x))) | |
801 # define GC_PRE_INCR3(x, n, type_of_result) \ | |
802 ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x)) | |
803 # define GC_POST_INCR2(x, type_of_result) \ | |
804 ((type_of_result)GC_post_incr(&(x), sizeof(*x)) | |
805 # ifdef __GNUC__ | |
806 # define GC_PTR_ADD(x, n) \ | |
807 GC_PTR_ADD3(x, n, typeof(x)) | |
808 # define GC_PRE_INCR(x, n) \ | |
809 GC_PRE_INCR3(x, n, typeof(x)) | |
810 # define GC_POST_INCR(x, n) \ | |
811 GC_POST_INCR3(x, typeof(x)) | |
812 # else | |
813 /* We can't do this right without typeof, which ANSI */ | |
814 /* decided was not sufficiently useful. Repeatedly */ | |
815 /* mentioning the arguments seems too dangerous to be */ | |
816 /* useful. So does not casting the result. */ | |
817 # define GC_PTR_ADD(x, n) ((x)+(n)) | |
818 # endif | |
819 #else /* !GC_DEBUG */ | |
820 # define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n)) | |
821 # define GC_PTR_ADD(x, n) ((x)+(n)) | |
822 # define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n)) | |
823 # define GC_PRE_INCR(x, n) ((x) += (n)) | |
824 # define GC_POST_INCR2(x, n, type_of_result) ((x)++) | |
825 # define GC_POST_INCR(x, n) ((x)++) | |
826 #endif | |
827 | |
828 /* Safer assignment of a pointer to a nonstack location. */ | |
829 #ifdef GC_DEBUG | |
830 # ifdef __STDC__ | |
831 # define GC_PTR_STORE(p, q) \ | |
832 (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q)) | |
833 # else | |
834 # define GC_PTR_STORE(p, q) \ | |
835 (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q)) | |
836 # endif | |
837 #else /* !GC_DEBUG */ | |
838 # define GC_PTR_STORE(p, q) *((p) = (q)) | |
839 #endif | |
840 | |
841 /* Fynctions called to report pointer checking errors */ | |
842 GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q)); | |
843 | |
844 GC_API void (*GC_is_valid_displacement_print_proc) | |
845 GC_PROTO((GC_PTR p)); | |
846 | |
847 GC_API void (*GC_is_visible_print_proc) | |
848 GC_PROTO((GC_PTR p)); | |
849 | |
850 | |
851 /* For pthread support, we generally need to intercept a number of */ | |
852 /* thread library calls. We do that here by macro defining them. */ | |
853 | |
854 #if !defined(GC_USE_LD_WRAP) && \ | |
855 (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS)) | |
856 # include "gc_pthread_redirects.h" | |
857 #endif | |
858 | |
859 # if defined(PCR) || defined(GC_SOLARIS_THREADS) || \ | |
860 defined(GC_PTHREADS) || defined(GC_WIN32_THREADS) | |
861 /* Any flavor of threads except SRC_M3. */ | |
862 /* This returns a list of objects, linked through their first */ | |
863 /* word. Its use can greatly reduce lock contention problems, since */ | |
864 /* the allocation lock can be acquired and released many fewer times. */ | |
865 /* lb must be large enough to hold the pointer field. */ | |
866 /* It is used internally by gc_local_alloc.h, which provides a simpler */ | |
867 /* programming interface on Linux. */ | |
868 GC_PTR GC_malloc_many(size_t lb); | |
869 #define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */ | |
870 /* in returned list. */ | |
871 extern void GC_thr_init(); /* Needed for Solaris/X86 */ | |
872 | |
873 #endif /* THREADS && !SRC_M3 */ | |
874 | |
875 #if defined(GC_WIN32_THREADS) | |
876 # include <windows.h> | |
877 # include <winbase.h> | |
878 | |
879 /* | |
880 * All threads must be created using GC_CreateThread, so that they will be | |
881 * recorded in the thread table. For backwards compatibility, this is not | |
882 * technically true if the GC is built as a dynamic library, since it can | |
883 * and does then use DllMain to keep track of thread creations. But new code | |
884 * should be built to call GC_CreateThread. | |
885 */ | |
886 HANDLE WINAPI GC_CreateThread( | |
887 LPSECURITY_ATTRIBUTES lpThreadAttributes, | |
888 DWORD dwStackSize, LPTHREAD_START_ROUTINE lpStartAddress, | |
889 LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD lpThreadId ); | |
890 | |
891 # if defined(_WIN32_WCE) | |
892 /* | |
893 * win32_threads.c implements the real WinMain, which will start a new thread | |
894 * to call GC_WinMain after initializing the garbage collector. | |
895 */ | |
896 int WINAPI GC_WinMain( | |
897 HINSTANCE hInstance, | |
898 HINSTANCE hPrevInstance, | |
899 LPWSTR lpCmdLine, | |
900 int nCmdShow ); | |
901 | |
902 # ifndef GC_BUILD | |
903 # define WinMain GC_WinMain | |
904 # define CreateThread GC_CreateThread | |
905 # endif | |
906 # endif /* defined(_WIN32_WCE) */ | |
907 | |
908 #endif /* defined(GC_WIN32_THREADS) */ | |
909 | |
910 /* | |
911 * If you are planning on putting | |
912 * the collector in a SunOS 5 dynamic library, you need to call GC_INIT() | |
913 * from the statically loaded program section. | |
914 * This circumvents a Solaris 2.X (X<=4) linker bug. | |
915 */ | |
916 #if defined(sparc) || defined(__sparc) | |
917 # define GC_INIT() { extern end, etext; \ | |
918 GC_noop(&end, &etext); } | |
919 #else | |
920 # if defined(__CYGWIN32__) && defined(GC_USE_DLL) || defined (_AIX) | |
921 /* | |
922 * Similarly gnu-win32 DLLs need explicit initialization from | |
923 * the main program, as does AIX. | |
924 */ | |
925 # define GC_INIT() { GC_add_roots(DATASTART, DATAEND); } | |
926 # else | |
927 # define GC_INIT() | |
928 # endif | |
929 #endif | |
930 | |
931 #if !defined(_WIN32_WCE) \ | |
932 && ((defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \ | |
933 || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)) | |
934 /* win32S may not free all resources on process exit. */ | |
935 /* This explicitly deallocates the heap. */ | |
936 GC_API void GC_win32_free_heap (); | |
937 #endif | |
938 | |
939 #if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) ) | |
940 /* Allocation really goes through GC_amiga_allocwrapper_do */ | |
941 # include "gc_amiga_redirects.h" | |
942 #endif | |
943 | |
944 #if defined(GC_REDIRECT_TO_LOCAL) && !defined(GC_LOCAL_ALLOC_H) | |
945 # include "gc_local_alloc.h" | |
946 #endif | |
947 | |
948 #ifdef __cplusplus | |
949 } /* end of extern "C" */ | |
950 #endif | |
951 | |
952 #endif /* _GC_H */ |