Mercurial > emacs
comparison gc/os_dep.c @ 51488:5de98dce4bd1
*** empty log message ***
author | Dave Love <fx@gnu.org> |
---|---|
date | Thu, 05 Jun 2003 17:49:22 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
51487:01d68b199093 | 51488:5de98dce4bd1 |
---|---|
1 /* | |
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers | |
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. | |
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. | |
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved. | |
6 * | |
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
9 * | |
10 * Permission is hereby granted to use or copy this program | |
11 * for any purpose, provided the above notices are retained on all copies. | |
12 * Permission to modify the code and to distribute modified code is granted, | |
13 * provided the above notices are retained, and a notice that the code was | |
14 * modified is included with the above copyright notice. | |
15 */ | |
16 | |
17 # include "private/gc_priv.h" | |
18 | |
19 # if defined(LINUX) && !defined(POWERPC) | |
20 # include <linux/version.h> | |
21 # if (LINUX_VERSION_CODE <= 0x10400) | |
22 /* Ugly hack to get struct sigcontext_struct definition. Required */ | |
23 /* for some early 1.3.X releases. Will hopefully go away soon. */ | |
24 /* in some later Linux releases, asm/sigcontext.h may have to */ | |
25 /* be included instead. */ | |
26 # define __KERNEL__ | |
27 # include <asm/signal.h> | |
28 # undef __KERNEL__ | |
29 # else | |
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */ | |
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */ | |
32 /* prototypes, so we have to include the top-level sigcontext.h to */ | |
33 /* make sure the former gets defined to be the latter if appropriate. */ | |
34 # include <features.h> | |
35 # if 2 <= __GLIBC__ | |
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__ | |
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */ | |
38 /* has the right declaration for glibc 2.1. */ | |
39 # include <sigcontext.h> | |
40 # endif /* 0 == __GLIBC_MINOR__ */ | |
41 # else /* not 2 <= __GLIBC__ */ | |
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */ | |
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */ | |
44 # include <asm/sigcontext.h> | |
45 # endif /* 2 <= __GLIBC__ */ | |
46 # endif | |
47 # endif | |
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \ | |
49 && !defined(MSWINCE) | |
50 # include <sys/types.h> | |
51 # if !defined(MSWIN32) && !defined(SUNOS4) | |
52 # include <unistd.h> | |
53 # endif | |
54 # endif | |
55 | |
56 # include <stdio.h> | |
57 # if defined(MSWINCE) | |
58 # define SIGSEGV 0 /* value is irrelevant */ | |
59 # else | |
60 # include <signal.h> | |
61 # endif | |
62 | |
63 /* Blatantly OS dependent routines, except for those that are related */ | |
64 /* to dynamic loading. */ | |
65 | |
66 # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START) | |
67 # define NEED_FIND_LIMIT | |
68 # endif | |
69 | |
70 # if !defined(STACKBOTTOM) && defined(HEURISTIC2) | |
71 # define NEED_FIND_LIMIT | |
72 # endif | |
73 | |
74 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR) | |
75 # define NEED_FIND_LIMIT | |
76 # endif | |
77 | |
78 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \ | |
79 || (defined(LINUX) && defined(SPARC))) && !defined(PCR) | |
80 # define NEED_FIND_LIMIT | |
81 # endif | |
82 | |
83 #if defined(FREEBSD) && defined(I386) | |
84 # include <machine/trap.h> | |
85 # if !defined(PCR) | |
86 # define NEED_FIND_LIMIT | |
87 # endif | |
88 #endif | |
89 | |
90 #ifdef NEED_FIND_LIMIT | |
91 # include <setjmp.h> | |
92 #endif | |
93 | |
94 #ifdef AMIGA | |
95 # define GC_AMIGA_DEF | |
96 # include "AmigaOS.c" | |
97 # undef GC_AMIGA_DEF | |
98 #endif | |
99 | |
100 #if defined(MSWIN32) || defined(MSWINCE) | |
101 # define WIN32_LEAN_AND_MEAN | |
102 # define NOSERVICE | |
103 # include <windows.h> | |
104 #endif | |
105 | |
106 #ifdef MACOS | |
107 # include <Processes.h> | |
108 #endif | |
109 | |
110 #ifdef IRIX5 | |
111 # include <sys/uio.h> | |
112 # include <malloc.h> /* for locking */ | |
113 #endif | |
114 #ifdef USE_MMAP | |
115 # include <sys/types.h> | |
116 # include <sys/mman.h> | |
117 # include <sys/stat.h> | |
118 #endif | |
119 | |
120 #ifdef UNIX_LIKE | |
121 # include <fcntl.h> | |
122 #endif | |
123 | |
124 #if defined(SUNOS5SIGS) || defined (HURD) || defined(LINUX) | |
125 # ifdef SUNOS5SIGS | |
126 # include <sys/siginfo.h> | |
127 # endif | |
128 # undef setjmp | |
129 # undef longjmp | |
130 # define setjmp(env) sigsetjmp(env, 1) | |
131 # define longjmp(env, val) siglongjmp(env, val) | |
132 # define jmp_buf sigjmp_buf | |
133 #endif | |
134 | |
135 #ifdef DJGPP | |
136 /* Apparently necessary for djgpp 2.01. May cause problems with */ | |
137 /* other versions. */ | |
138 typedef long unsigned int caddr_t; | |
139 #endif | |
140 | |
141 #ifdef PCR | |
142 # include "il/PCR_IL.h" | |
143 # include "th/PCR_ThCtl.h" | |
144 # include "mm/PCR_MM.h" | |
145 #endif | |
146 | |
147 #if !defined(NO_EXECUTE_PERMISSION) | |
148 # define OPT_PROT_EXEC PROT_EXEC | |
149 #else | |
150 # define OPT_PROT_EXEC 0 | |
151 #endif | |
152 | |
153 #if defined(SEARCH_FOR_DATA_START) | |
154 /* The I386 case can be handled without a search. The Alpha case */ | |
155 /* used to be handled differently as well, but the rules changed */ | |
156 /* for recent Linux versions. This seems to be the easiest way to */ | |
157 /* cover all versions. */ | |
158 | |
159 # ifdef LINUX | |
160 /* Some Linux distributions arrange to define __data_start. Some */ | |
161 /* define data_start as a weak symbol. The latter is technically */ | |
162 /* broken, since the user program may define data_start, in which */ | |
163 /* case we lose. Nonetheless, we try both, prefering __data_start. */ | |
164 /* We assume gcc-compatible pragmas. */ | |
165 # pragma weak __data_start | |
166 extern int __data_start[]; | |
167 # pragma weak data_start | |
168 extern int data_start[]; | |
169 # endif /* LINUX */ | |
170 extern int _end[]; | |
171 | |
172 ptr_t GC_data_start; | |
173 | |
174 void GC_init_linux_data_start() | |
175 { | |
176 extern ptr_t GC_find_limit(); | |
177 | |
178 # ifdef LINUX | |
179 /* Try the easy approaches first: */ | |
180 if ((ptr_t)__data_start != 0) { | |
181 GC_data_start = (ptr_t)(__data_start); | |
182 return; | |
183 } | |
184 if ((ptr_t)data_start != 0) { | |
185 GC_data_start = (ptr_t)(data_start); | |
186 return; | |
187 } | |
188 # endif /* LINUX */ | |
189 GC_data_start = GC_find_limit((ptr_t)(_end), FALSE); | |
190 } | |
191 #endif | |
192 | |
193 # ifdef ECOS | |
194 | |
195 # ifndef ECOS_GC_MEMORY_SIZE | |
196 # define ECOS_GC_MEMORY_SIZE (448 * 1024) | |
197 # endif /* ECOS_GC_MEMORY_SIZE */ | |
198 | |
199 // setjmp() function, as described in ANSI para 7.6.1.1 | |
200 #define setjmp( __env__ ) hal_setjmp( __env__ ) | |
201 | |
202 // FIXME: This is a simple way of allocating memory which is | |
203 // compatible with ECOS early releases. Later releases use a more | |
204 // sophisticated means of allocating memory than this simple static | |
205 // allocator, but this method is at least bound to work. | |
206 static char memory[ECOS_GC_MEMORY_SIZE]; | |
207 static char *brk = memory; | |
208 | |
209 static void *tiny_sbrk(ptrdiff_t increment) | |
210 { | |
211 void *p = brk; | |
212 | |
213 brk += increment; | |
214 | |
215 if (brk > memory + sizeof memory) | |
216 { | |
217 brk -= increment; | |
218 return NULL; | |
219 } | |
220 | |
221 return p; | |
222 } | |
223 #define sbrk tiny_sbrk | |
224 # endif /* ECOS */ | |
225 | |
226 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) | |
227 ptr_t GC_data_start; | |
228 | |
229 void GC_init_netbsd_elf() | |
230 { | |
231 extern ptr_t GC_find_limit(); | |
232 extern char **environ; | |
233 /* This may need to be environ, without the underscore, for */ | |
234 /* some versions. */ | |
235 GC_data_start = GC_find_limit((ptr_t)&environ, FALSE); | |
236 } | |
237 #endif | |
238 | |
239 # ifdef OS2 | |
240 | |
241 # include <stddef.h> | |
242 | |
243 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */ | |
244 | |
245 struct exe_hdr { | |
246 unsigned short magic_number; | |
247 unsigned short padding[29]; | |
248 long new_exe_offset; | |
249 }; | |
250 | |
251 #define E_MAGIC(x) (x).magic_number | |
252 #define EMAGIC 0x5A4D | |
253 #define E_LFANEW(x) (x).new_exe_offset | |
254 | |
255 struct e32_exe { | |
256 unsigned char magic_number[2]; | |
257 unsigned char byte_order; | |
258 unsigned char word_order; | |
259 unsigned long exe_format_level; | |
260 unsigned short cpu; | |
261 unsigned short os; | |
262 unsigned long padding1[13]; | |
263 unsigned long object_table_offset; | |
264 unsigned long object_count; | |
265 unsigned long padding2[31]; | |
266 }; | |
267 | |
268 #define E32_MAGIC1(x) (x).magic_number[0] | |
269 #define E32MAGIC1 'L' | |
270 #define E32_MAGIC2(x) (x).magic_number[1] | |
271 #define E32MAGIC2 'X' | |
272 #define E32_BORDER(x) (x).byte_order | |
273 #define E32LEBO 0 | |
274 #define E32_WORDER(x) (x).word_order | |
275 #define E32LEWO 0 | |
276 #define E32_CPU(x) (x).cpu | |
277 #define E32CPU286 1 | |
278 #define E32_OBJTAB(x) (x).object_table_offset | |
279 #define E32_OBJCNT(x) (x).object_count | |
280 | |
281 struct o32_obj { | |
282 unsigned long size; | |
283 unsigned long base; | |
284 unsigned long flags; | |
285 unsigned long pagemap; | |
286 unsigned long mapsize; | |
287 unsigned long reserved; | |
288 }; | |
289 | |
290 #define O32_FLAGS(x) (x).flags | |
291 #define OBJREAD 0x0001L | |
292 #define OBJWRITE 0x0002L | |
293 #define OBJINVALID 0x0080L | |
294 #define O32_SIZE(x) (x).size | |
295 #define O32_BASE(x) (x).base | |
296 | |
297 # else /* IBM's compiler */ | |
298 | |
299 /* A kludge to get around what appears to be a header file bug */ | |
300 # ifndef WORD | |
301 # define WORD unsigned short | |
302 # endif | |
303 # ifndef DWORD | |
304 # define DWORD unsigned long | |
305 # endif | |
306 | |
307 # define EXE386 1 | |
308 # include <newexe.h> | |
309 # include <exe386.h> | |
310 | |
311 # endif /* __IBMC__ */ | |
312 | |
313 # define INCL_DOSEXCEPTIONS | |
314 # define INCL_DOSPROCESS | |
315 # define INCL_DOSERRORS | |
316 # define INCL_DOSMODULEMGR | |
317 # define INCL_DOSMEMMGR | |
318 # include <os2.h> | |
319 | |
320 | |
321 /* Disable and enable signals during nontrivial allocations */ | |
322 | |
323 void GC_disable_signals(void) | |
324 { | |
325 ULONG nest; | |
326 | |
327 DosEnterMustComplete(&nest); | |
328 if (nest != 1) ABORT("nested GC_disable_signals"); | |
329 } | |
330 | |
331 void GC_enable_signals(void) | |
332 { | |
333 ULONG nest; | |
334 | |
335 DosExitMustComplete(&nest); | |
336 if (nest != 0) ABORT("GC_enable_signals"); | |
337 } | |
338 | |
339 | |
340 # else | |
341 | |
342 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \ | |
343 && !defined(MSWINCE) \ | |
344 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \ | |
345 && !defined(NOSYS) && !defined(ECOS) | |
346 | |
347 # if defined(sigmask) && !defined(UTS4) && !defined(HURD) | |
348 /* Use the traditional BSD interface */ | |
349 # define SIGSET_T int | |
350 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal)) | |
351 # define SIG_FILL(set) (set) = 0x7fffffff | |
352 /* Setting the leading bit appears to provoke a bug in some */ | |
353 /* longjmp implementations. Most systems appear not to have */ | |
354 /* a signal 32. */ | |
355 # define SIGSETMASK(old, new) (old) = sigsetmask(new) | |
356 # else | |
357 /* Use POSIX/SYSV interface */ | |
358 # define SIGSET_T sigset_t | |
359 # define SIG_DEL(set, signal) sigdelset(&(set), (signal)) | |
360 # define SIG_FILL(set) sigfillset(&set) | |
361 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old)) | |
362 # endif | |
363 | |
364 static GC_bool mask_initialized = FALSE; | |
365 | |
366 static SIGSET_T new_mask; | |
367 | |
368 static SIGSET_T old_mask; | |
369 | |
370 static SIGSET_T dummy; | |
371 | |
372 #if defined(PRINTSTATS) && !defined(THREADS) | |
373 # define CHECK_SIGNALS | |
374 int GC_sig_disabled = 0; | |
375 #endif | |
376 | |
377 void GC_disable_signals() | |
378 { | |
379 if (!mask_initialized) { | |
380 SIG_FILL(new_mask); | |
381 | |
382 SIG_DEL(new_mask, SIGSEGV); | |
383 SIG_DEL(new_mask, SIGILL); | |
384 SIG_DEL(new_mask, SIGQUIT); | |
385 # ifdef SIGBUS | |
386 SIG_DEL(new_mask, SIGBUS); | |
387 # endif | |
388 # ifdef SIGIOT | |
389 SIG_DEL(new_mask, SIGIOT); | |
390 # endif | |
391 # ifdef SIGEMT | |
392 SIG_DEL(new_mask, SIGEMT); | |
393 # endif | |
394 # ifdef SIGTRAP | |
395 SIG_DEL(new_mask, SIGTRAP); | |
396 # endif | |
397 mask_initialized = TRUE; | |
398 } | |
399 # ifdef CHECK_SIGNALS | |
400 if (GC_sig_disabled != 0) ABORT("Nested disables"); | |
401 GC_sig_disabled++; | |
402 # endif | |
403 SIGSETMASK(old_mask,new_mask); | |
404 } | |
405 | |
406 void GC_enable_signals() | |
407 { | |
408 # ifdef CHECK_SIGNALS | |
409 if (GC_sig_disabled != 1) ABORT("Unmatched enable"); | |
410 GC_sig_disabled--; | |
411 # endif | |
412 SIGSETMASK(dummy,old_mask); | |
413 } | |
414 | |
415 # endif /* !PCR */ | |
416 | |
417 # endif /*!OS/2 */ | |
418 | |
419 /* Ivan Demakov: simplest way (to me) */ | |
420 #if defined (DOS4GW) | |
421 void GC_disable_signals() { } | |
422 void GC_enable_signals() { } | |
423 #endif | |
424 | |
425 /* Find the page size */ | |
426 word GC_page_size; | |
427 | |
428 # if defined(MSWIN32) || defined(MSWINCE) | |
429 void GC_setpagesize() | |
430 { | |
431 GetSystemInfo(&GC_sysinfo); | |
432 GC_page_size = GC_sysinfo.dwPageSize; | |
433 } | |
434 | |
435 # else | |
436 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \ | |
437 || defined(USE_MUNMAP) | |
438 void GC_setpagesize() | |
439 { | |
440 GC_page_size = GETPAGESIZE(); | |
441 } | |
442 # else | |
443 /* It's acceptable to fake it. */ | |
444 void GC_setpagesize() | |
445 { | |
446 GC_page_size = HBLKSIZE; | |
447 } | |
448 # endif | |
449 # endif | |
450 | |
451 /* | |
452 * Find the base of the stack. | |
453 * Used only in single-threaded environment. | |
454 * With threads, GC_mark_roots needs to know how to do this. | |
455 * Called with allocator lock held. | |
456 */ | |
457 # if defined(MSWIN32) || defined(MSWINCE) | |
458 # define is_writable(prot) ((prot) == PAGE_READWRITE \ | |
459 || (prot) == PAGE_WRITECOPY \ | |
460 || (prot) == PAGE_EXECUTE_READWRITE \ | |
461 || (prot) == PAGE_EXECUTE_WRITECOPY) | |
462 /* Return the number of bytes that are writable starting at p. */ | |
463 /* The pointer p is assumed to be page aligned. */ | |
464 /* If base is not 0, *base becomes the beginning of the */ | |
465 /* allocation region containing p. */ | |
466 word GC_get_writable_length(ptr_t p, ptr_t *base) | |
467 { | |
468 MEMORY_BASIC_INFORMATION buf; | |
469 word result; | |
470 word protect; | |
471 | |
472 result = VirtualQuery(p, &buf, sizeof(buf)); | |
473 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result"); | |
474 if (base != 0) *base = (ptr_t)(buf.AllocationBase); | |
475 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE)); | |
476 if (!is_writable(protect)) { | |
477 return(0); | |
478 } | |
479 if (buf.State != MEM_COMMIT) return(0); | |
480 return(buf.RegionSize); | |
481 } | |
482 | |
483 ptr_t GC_get_stack_base() | |
484 { | |
485 int dummy; | |
486 ptr_t sp = (ptr_t)(&dummy); | |
487 ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1)); | |
488 word size = GC_get_writable_length(trunc_sp, 0); | |
489 | |
490 return(trunc_sp + size); | |
491 } | |
492 | |
493 | |
494 # endif /* MS Windows */ | |
495 | |
496 # ifdef BEOS | |
497 # include <kernel/OS.h> | |
498 ptr_t GC_get_stack_base(){ | |
499 thread_info th; | |
500 get_thread_info(find_thread(NULL),&th); | |
501 return th.stack_end; | |
502 } | |
503 # endif /* BEOS */ | |
504 | |
505 | |
506 # ifdef OS2 | |
507 | |
508 ptr_t GC_get_stack_base() | |
509 { | |
510 PTIB ptib; | |
511 PPIB ppib; | |
512 | |
513 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) { | |
514 GC_err_printf0("DosGetInfoBlocks failed\n"); | |
515 ABORT("DosGetInfoBlocks failed\n"); | |
516 } | |
517 return((ptr_t)(ptib -> tib_pstacklimit)); | |
518 } | |
519 | |
520 # endif /* OS2 */ | |
521 | |
522 # ifdef AMIGA | |
523 # define GC_AMIGA_SB | |
524 # include "AmigaOS.c" | |
525 # undef GC_AMIGA_SB | |
526 # endif /* AMIGA */ | |
527 | |
528 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE) | |
529 | |
530 # ifdef __STDC__ | |
531 typedef void (*handler)(int); | |
532 # else | |
533 typedef void (*handler)(); | |
534 # endif | |
535 | |
536 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) || defined(HURD) | |
537 static struct sigaction old_segv_act; | |
538 # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) || defined(HURD) | |
539 static struct sigaction old_bus_act; | |
540 # endif | |
541 # else | |
542 static handler old_segv_handler, old_bus_handler; | |
543 # endif | |
544 | |
545 # ifdef __STDC__ | |
546 void GC_set_and_save_fault_handler(handler h) | |
547 # else | |
548 void GC_set_and_save_fault_handler(h) | |
549 handler h; | |
550 # endif | |
551 { | |
552 # if defined(SUNOS5SIGS) || defined(IRIX5) \ | |
553 || defined(OSF1) || defined(HURD) | |
554 struct sigaction act; | |
555 | |
556 act.sa_handler = h; | |
557 # ifdef SUNOS5SIGS | |
558 act.sa_flags = SA_RESTART | SA_NODEFER; | |
559 # else | |
560 act.sa_flags = SA_RESTART; | |
561 # endif | |
562 /* The presence of SA_NODEFER represents yet another gross */ | |
563 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */ | |
564 /* interact correctly with -lthread. We hide the confusion */ | |
565 /* by making sure that signal handling doesn't affect the */ | |
566 /* signal mask. */ | |
567 | |
568 (void) sigemptyset(&act.sa_mask); | |
569 # ifdef GC_IRIX_THREADS | |
570 /* Older versions have a bug related to retrieving and */ | |
571 /* and setting a handler at the same time. */ | |
572 (void) sigaction(SIGSEGV, 0, &old_segv_act); | |
573 (void) sigaction(SIGSEGV, &act, 0); | |
574 # else | |
575 (void) sigaction(SIGSEGV, &act, &old_segv_act); | |
576 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \ | |
577 || defined(HPUX) || defined(HURD) | |
578 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */ | |
579 /* Pthreads doesn't exist under Irix 5.x, so we */ | |
580 /* don't have to worry in the threads case. */ | |
581 (void) sigaction(SIGBUS, &act, &old_bus_act); | |
582 # endif | |
583 # endif /* GC_IRIX_THREADS */ | |
584 # else | |
585 old_segv_handler = signal(SIGSEGV, h); | |
586 # ifdef SIGBUS | |
587 old_bus_handler = signal(SIGBUS, h); | |
588 # endif | |
589 # endif | |
590 } | |
591 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */ | |
592 | |
593 # ifdef NEED_FIND_LIMIT | |
594 /* Some tools to implement HEURISTIC2 */ | |
595 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */ | |
596 /* static */ jmp_buf GC_jmp_buf; | |
597 | |
598 /*ARGSUSED*/ | |
599 void GC_fault_handler(sig) | |
600 int sig; | |
601 { | |
602 longjmp(GC_jmp_buf, 1); | |
603 } | |
604 | |
605 void GC_setup_temporary_fault_handler() | |
606 { | |
607 GC_set_and_save_fault_handler(GC_fault_handler); | |
608 } | |
609 | |
610 void GC_reset_fault_handler() | |
611 { | |
612 # if defined(SUNOS5SIGS) || defined(IRIX5) \ | |
613 || defined(OSF1) || defined(HURD) | |
614 (void) sigaction(SIGSEGV, &old_segv_act, 0); | |
615 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \ | |
616 || defined(HPUX) || defined(HURD) | |
617 (void) sigaction(SIGBUS, &old_bus_act, 0); | |
618 # endif | |
619 # else | |
620 (void) signal(SIGSEGV, old_segv_handler); | |
621 # ifdef SIGBUS | |
622 (void) signal(SIGBUS, old_bus_handler); | |
623 # endif | |
624 # endif | |
625 } | |
626 | |
627 /* Return the first nonaddressible location > p (up) or */ | |
628 /* the smallest location q s.t. [q,p) is addressable (!up). */ | |
629 /* We assume that p (up) or p-1 (!up) is addressable. */ | |
630 ptr_t GC_find_limit(p, up) | |
631 ptr_t p; | |
632 GC_bool up; | |
633 { | |
634 static VOLATILE ptr_t result; | |
635 /* Needs to be static, since otherwise it may not be */ | |
636 /* preserved across the longjmp. Can safely be */ | |
637 /* static since it's only called once, with the */ | |
638 /* allocation lock held. */ | |
639 | |
640 | |
641 GC_setup_temporary_fault_handler(); | |
642 if (setjmp(GC_jmp_buf) == 0) { | |
643 result = (ptr_t)(((word)(p)) | |
644 & ~(MIN_PAGE_SIZE-1)); | |
645 for (;;) { | |
646 if (up) { | |
647 result += MIN_PAGE_SIZE; | |
648 } else { | |
649 result -= MIN_PAGE_SIZE; | |
650 } | |
651 GC_noop1((word)(*result)); | |
652 } | |
653 } | |
654 GC_reset_fault_handler(); | |
655 if (!up) { | |
656 result += MIN_PAGE_SIZE; | |
657 } | |
658 return(result); | |
659 } | |
660 # endif | |
661 | |
662 #if defined(ECOS) || defined(NOSYS) | |
663 ptr_t GC_get_stack_base() | |
664 { | |
665 return STACKBOTTOM; | |
666 } | |
667 #endif | |
668 | |
669 #ifdef LINUX_STACKBOTTOM | |
670 | |
671 #include <sys/types.h> | |
672 #include <sys/stat.h> | |
673 #include <ctype.h> | |
674 | |
675 # define STAT_SKIP 27 /* Number of fields preceding startstack */ | |
676 /* field in /proc/self/stat */ | |
677 | |
678 # pragma weak __libc_stack_end | |
679 extern ptr_t __libc_stack_end; | |
680 | |
681 # ifdef IA64 | |
682 # pragma weak __libc_ia64_register_backing_store_base | |
683 extern ptr_t __libc_ia64_register_backing_store_base; | |
684 | |
685 ptr_t GC_get_register_stack_base(void) | |
686 { | |
687 if (0 != &__libc_ia64_register_backing_store_base | |
688 && 0 != __libc_ia64_register_backing_store_base) { | |
689 /* Glibc 2.2.4 has a bug such that for dynamically linked */ | |
690 /* executables __libc_ia64_register_backing_store_base is */ | |
691 /* defined but ininitialized during constructor calls. */ | |
692 /* Hence we check for both nonzero address and value. */ | |
693 return __libc_ia64_register_backing_store_base; | |
694 } else { | |
695 word result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT; | |
696 result += BACKING_STORE_ALIGNMENT - 1; | |
697 result &= ~(BACKING_STORE_ALIGNMENT - 1); | |
698 return (ptr_t)result; | |
699 } | |
700 } | |
701 # endif | |
702 | |
703 ptr_t GC_linux_stack_base(void) | |
704 { | |
705 /* We read the stack base value from /proc/self/stat. We do this */ | |
706 /* using direct I/O system calls in order to avoid calling malloc */ | |
707 /* in case REDIRECT_MALLOC is defined. */ | |
708 # define STAT_BUF_SIZE 4096 | |
709 # if defined(GC_USE_LD_WRAP) | |
710 # define STAT_READ __real_read | |
711 # else | |
712 # define STAT_READ read | |
713 # endif | |
714 char stat_buf[STAT_BUF_SIZE]; | |
715 int f; | |
716 char c; | |
717 word result = 0; | |
718 size_t i, buf_offset = 0; | |
719 | |
720 /* First try the easy way. This should work for glibc 2.2 */ | |
721 if (0 != &__libc_stack_end) { | |
722 # ifdef IA64 | |
723 /* Some versions of glibc set the address 16 bytes too */ | |
724 /* low while the initialization code is running. */ | |
725 if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) { | |
726 return __libc_stack_end + 0x10; | |
727 } /* Otherwise it's not safe to add 16 bytes and we fall */ | |
728 /* back to using /proc. */ | |
729 # else | |
730 return __libc_stack_end; | |
731 # endif | |
732 } | |
733 f = open("/proc/self/stat", O_RDONLY); | |
734 if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) { | |
735 ABORT("Couldn't read /proc/self/stat"); | |
736 } | |
737 c = stat_buf[buf_offset++]; | |
738 /* Skip the required number of fields. This number is hopefully */ | |
739 /* constant across all Linux implementations. */ | |
740 for (i = 0; i < STAT_SKIP; ++i) { | |
741 while (isspace(c)) c = stat_buf[buf_offset++]; | |
742 while (!isspace(c)) c = stat_buf[buf_offset++]; | |
743 } | |
744 while (isspace(c)) c = stat_buf[buf_offset++]; | |
745 while (isdigit(c)) { | |
746 result *= 10; | |
747 result += c - '0'; | |
748 c = stat_buf[buf_offset++]; | |
749 } | |
750 close(f); | |
751 if (result < 0x10000000) ABORT("Absurd stack bottom value"); | |
752 return (ptr_t)result; | |
753 } | |
754 | |
755 #endif /* LINUX_STACKBOTTOM */ | |
756 | |
757 #ifdef FREEBSD_STACKBOTTOM | |
758 | |
759 /* This uses an undocumented sysctl call, but at least one expert */ | |
760 /* believes it will stay. */ | |
761 | |
762 #include <unistd.h> | |
763 #include <sys/types.h> | |
764 #include <sys/sysctl.h> | |
765 | |
766 ptr_t GC_freebsd_stack_base(void) | |
767 { | |
768 int nm[2] = {CTL_KERN, KERN_USRSTACK}; | |
769 ptr_t base; | |
770 size_t len = sizeof(ptr_t); | |
771 int r = sysctl(nm, 2, &base, &len, NULL, 0); | |
772 | |
773 if (r) ABORT("Error getting stack base"); | |
774 | |
775 return base; | |
776 } | |
777 | |
778 #endif /* FREEBSD_STACKBOTTOM */ | |
779 | |
780 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \ | |
781 && !defined(MSWINCE) && !defined(OS2) | |
782 | |
783 ptr_t GC_get_stack_base() | |
784 { | |
785 word dummy; | |
786 ptr_t result; | |
787 | |
788 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1) | |
789 | |
790 # ifdef STACKBOTTOM | |
791 return(STACKBOTTOM); | |
792 # else | |
793 # ifdef HEURISTIC1 | |
794 # ifdef STACK_GROWS_DOWN | |
795 result = (ptr_t)((((word)(&dummy)) | |
796 + STACKBOTTOM_ALIGNMENT_M1) | |
797 & ~STACKBOTTOM_ALIGNMENT_M1); | |
798 # else | |
799 result = (ptr_t)(((word)(&dummy)) | |
800 & ~STACKBOTTOM_ALIGNMENT_M1); | |
801 # endif | |
802 # endif /* HEURISTIC1 */ | |
803 # ifdef LINUX_STACKBOTTOM | |
804 result = GC_linux_stack_base(); | |
805 # endif | |
806 # ifdef FREEBSD_STACKBOTTOM | |
807 result = GC_freebsd_stack_base(); | |
808 # endif | |
809 # ifdef HEURISTIC2 | |
810 # ifdef STACK_GROWS_DOWN | |
811 result = GC_find_limit((ptr_t)(&dummy), TRUE); | |
812 # ifdef HEURISTIC2_LIMIT | |
813 if (result > HEURISTIC2_LIMIT | |
814 && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) { | |
815 result = HEURISTIC2_LIMIT; | |
816 } | |
817 # endif | |
818 # else | |
819 result = GC_find_limit((ptr_t)(&dummy), FALSE); | |
820 # ifdef HEURISTIC2_LIMIT | |
821 if (result < HEURISTIC2_LIMIT | |
822 && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) { | |
823 result = HEURISTIC2_LIMIT; | |
824 } | |
825 # endif | |
826 # endif | |
827 | |
828 # endif /* HEURISTIC2 */ | |
829 # ifdef STACK_GROWS_DOWN | |
830 if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t)); | |
831 # endif | |
832 return(result); | |
833 # endif /* STACKBOTTOM */ | |
834 } | |
835 | |
836 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS */ | |
837 | |
838 /* | |
839 * Register static data segment(s) as roots. | |
840 * If more data segments are added later then they need to be registered | |
841 * add that point (as we do with SunOS dynamic loading), | |
842 * or GC_mark_roots needs to check for them (as we do with PCR). | |
843 * Called with allocator lock held. | |
844 */ | |
845 | |
846 # ifdef OS2 | |
847 | |
848 void GC_register_data_segments() | |
849 { | |
850 PTIB ptib; | |
851 PPIB ppib; | |
852 HMODULE module_handle; | |
853 # define PBUFSIZ 512 | |
854 UCHAR path[PBUFSIZ]; | |
855 FILE * myexefile; | |
856 struct exe_hdr hdrdos; /* MSDOS header. */ | |
857 struct e32_exe hdr386; /* Real header for my executable */ | |
858 struct o32_obj seg; /* Currrent segment */ | |
859 int nsegs; | |
860 | |
861 | |
862 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) { | |
863 GC_err_printf0("DosGetInfoBlocks failed\n"); | |
864 ABORT("DosGetInfoBlocks failed\n"); | |
865 } | |
866 module_handle = ppib -> pib_hmte; | |
867 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) { | |
868 GC_err_printf0("DosQueryModuleName failed\n"); | |
869 ABORT("DosGetInfoBlocks failed\n"); | |
870 } | |
871 myexefile = fopen(path, "rb"); | |
872 if (myexefile == 0) { | |
873 GC_err_puts("Couldn't open executable "); | |
874 GC_err_puts(path); GC_err_puts("\n"); | |
875 ABORT("Failed to open executable\n"); | |
876 } | |
877 if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) { | |
878 GC_err_puts("Couldn't read MSDOS header from "); | |
879 GC_err_puts(path); GC_err_puts("\n"); | |
880 ABORT("Couldn't read MSDOS header"); | |
881 } | |
882 if (E_MAGIC(hdrdos) != EMAGIC) { | |
883 GC_err_puts("Executable has wrong DOS magic number: "); | |
884 GC_err_puts(path); GC_err_puts("\n"); | |
885 ABORT("Bad DOS magic number"); | |
886 } | |
887 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) { | |
888 GC_err_puts("Seek to new header failed in "); | |
889 GC_err_puts(path); GC_err_puts("\n"); | |
890 ABORT("Bad DOS magic number"); | |
891 } | |
892 if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) { | |
893 GC_err_puts("Couldn't read MSDOS header from "); | |
894 GC_err_puts(path); GC_err_puts("\n"); | |
895 ABORT("Couldn't read OS/2 header"); | |
896 } | |
897 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) { | |
898 GC_err_puts("Executable has wrong OS/2 magic number:"); | |
899 GC_err_puts(path); GC_err_puts("\n"); | |
900 ABORT("Bad OS/2 magic number"); | |
901 } | |
902 if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) { | |
903 GC_err_puts("Executable %s has wrong byte order: "); | |
904 GC_err_puts(path); GC_err_puts("\n"); | |
905 ABORT("Bad byte order"); | |
906 } | |
907 if ( E32_CPU(hdr386) == E32CPU286) { | |
908 GC_err_puts("GC can't handle 80286 executables: "); | |
909 GC_err_puts(path); GC_err_puts("\n"); | |
910 EXIT(); | |
911 } | |
912 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386), | |
913 SEEK_SET) != 0) { | |
914 GC_err_puts("Seek to object table failed: "); | |
915 GC_err_puts(path); GC_err_puts("\n"); | |
916 ABORT("Seek to object table failed"); | |
917 } | |
918 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) { | |
919 int flags; | |
920 if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) { | |
921 GC_err_puts("Couldn't read obj table entry from "); | |
922 GC_err_puts(path); GC_err_puts("\n"); | |
923 ABORT("Couldn't read obj table entry"); | |
924 } | |
925 flags = O32_FLAGS(seg); | |
926 if (!(flags & OBJWRITE)) continue; | |
927 if (!(flags & OBJREAD)) continue; | |
928 if (flags & OBJINVALID) { | |
929 GC_err_printf0("Object with invalid pages?\n"); | |
930 continue; | |
931 } | |
932 GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE); | |
933 } | |
934 } | |
935 | |
936 # else /* !OS2 */ | |
937 | |
938 # if defined(MSWIN32) || defined(MSWINCE) | |
939 | |
940 # ifdef MSWIN32 | |
941 /* Unfortunately, we have to handle win32s very differently from NT, */ | |
942 /* Since VirtualQuery has very different semantics. In particular, */ | |
943 /* under win32s a VirtualQuery call on an unmapped page returns an */ | |
944 /* invalid result. Under NT, GC_register_data_segments is a noop and */ | |
945 /* all real work is done by GC_register_dynamic_libraries. Under */ | |
946 /* win32s, we cannot find the data segments associated with dll's. */ | |
947 /* We register the main data segment here. */ | |
948 # ifdef __GCC__ | |
949 GC_bool GC_no_win32_dlls = TRUE; | |
950 /* GCC can't do SEH, so we can't use VirtualQuery */ | |
951 # else | |
952 GC_bool GC_no_win32_dlls = FALSE; | |
953 # endif | |
954 | |
955 void GC_init_win32() | |
956 { | |
957 /* if we're running under win32s, assume that no DLLs will be loaded */ | |
958 DWORD v = GetVersion(); | |
959 GC_no_win32_dlls |= ((v & 0x80000000) && (v & 0xff) <= 3); | |
960 } | |
961 | |
962 /* Return the smallest address a such that VirtualQuery */ | |
963 /* returns correct results for all addresses between a and start. */ | |
964 /* Assumes VirtualQuery returns correct information for start. */ | |
965 ptr_t GC_least_described_address(ptr_t start) | |
966 { | |
967 MEMORY_BASIC_INFORMATION buf; | |
968 DWORD result; | |
969 LPVOID limit; | |
970 ptr_t p; | |
971 LPVOID q; | |
972 | |
973 limit = GC_sysinfo.lpMinimumApplicationAddress; | |
974 p = (ptr_t)((word)start & ~(GC_page_size - 1)); | |
975 for (;;) { | |
976 q = (LPVOID)(p - GC_page_size); | |
977 if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break; | |
978 result = VirtualQuery(q, &buf, sizeof(buf)); | |
979 if (result != sizeof(buf) || buf.AllocationBase == 0) break; | |
980 p = (ptr_t)(buf.AllocationBase); | |
981 } | |
982 return(p); | |
983 } | |
984 # endif | |
985 | |
986 # ifndef REDIRECT_MALLOC | |
987 /* We maintain a linked list of AllocationBase values that we know */ | |
988 /* correspond to malloc heap sections. Currently this is only called */ | |
989 /* during a GC. But there is some hope that for long running */ | |
990 /* programs we will eventually see most heap sections. */ | |
991 | |
992 /* In the long run, it would be more reliable to occasionally walk */ | |
993 /* the malloc heap with HeapWalk on the default heap. But that */ | |
994 /* apparently works only for NT-based Windows. */ | |
995 | |
996 /* In the long run, a better data structure would also be nice ... */ | |
997 struct GC_malloc_heap_list { | |
998 void * allocation_base; | |
999 struct GC_malloc_heap_list *next; | |
1000 } *GC_malloc_heap_l = 0; | |
1001 | |
1002 /* Is p the base of one of the malloc heap sections we already know */ | |
1003 /* about? */ | |
1004 GC_bool GC_is_malloc_heap_base(ptr_t p) | |
1005 { | |
1006 struct GC_malloc_heap_list *q = GC_malloc_heap_l; | |
1007 | |
1008 while (0 != q) { | |
1009 if (q -> allocation_base == p) return TRUE; | |
1010 q = q -> next; | |
1011 } | |
1012 return FALSE; | |
1013 } | |
1014 | |
1015 void *GC_get_allocation_base(void *p) | |
1016 { | |
1017 MEMORY_BASIC_INFORMATION buf; | |
1018 DWORD result = VirtualQuery(p, &buf, sizeof(buf)); | |
1019 if (result != sizeof(buf)) { | |
1020 ABORT("Weird VirtualQuery result"); | |
1021 } | |
1022 return buf.AllocationBase; | |
1023 } | |
1024 | |
1025 size_t GC_max_root_size = 100000; /* Appr. largest root size. */ | |
1026 | |
1027 void GC_add_current_malloc_heap() | |
1028 { | |
1029 struct GC_malloc_heap_list *new_l = | |
1030 malloc(sizeof(struct GC_malloc_heap_list)); | |
1031 void * candidate = GC_get_allocation_base(new_l); | |
1032 | |
1033 if (new_l == 0) return; | |
1034 if (GC_is_malloc_heap_base(candidate)) { | |
1035 /* Try a little harder to find malloc heap. */ | |
1036 size_t req_size = 10000; | |
1037 do { | |
1038 void *p = malloc(req_size); | |
1039 if (0 == p) { free(new_l); return; } | |
1040 candidate = GC_get_allocation_base(p); | |
1041 free(p); | |
1042 req_size *= 2; | |
1043 } while (GC_is_malloc_heap_base(candidate) | |
1044 && req_size < GC_max_root_size/10 && req_size < 500000); | |
1045 if (GC_is_malloc_heap_base(candidate)) { | |
1046 free(new_l); return; | |
1047 } | |
1048 } | |
1049 # ifdef CONDPRINT | |
1050 if (GC_print_stats) | |
1051 GC_printf1("Found new system malloc AllocationBase at 0x%lx\n", | |
1052 candidate); | |
1053 # endif | |
1054 new_l -> allocation_base = candidate; | |
1055 new_l -> next = GC_malloc_heap_l; | |
1056 GC_malloc_heap_l = new_l; | |
1057 } | |
1058 # endif /* REDIRECT_MALLOC */ | |
1059 | |
1060 /* Is p the start of either the malloc heap, or of one of our */ | |
1061 /* heap sections? */ | |
1062 GC_bool GC_is_heap_base (ptr_t p) | |
1063 { | |
1064 | |
1065 unsigned i; | |
1066 | |
1067 # ifndef REDIRECT_MALLOC | |
1068 static word last_gc_no = -1; | |
1069 | |
1070 if (last_gc_no != GC_gc_no) { | |
1071 GC_add_current_malloc_heap(); | |
1072 last_gc_no = GC_gc_no; | |
1073 } | |
1074 if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size; | |
1075 if (GC_is_malloc_heap_base(p)) return TRUE; | |
1076 # endif | |
1077 for (i = 0; i < GC_n_heap_bases; i++) { | |
1078 if (GC_heap_bases[i] == p) return TRUE; | |
1079 } | |
1080 return FALSE ; | |
1081 } | |
1082 | |
1083 # ifdef MSWIN32 | |
1084 void GC_register_root_section(ptr_t static_root) | |
1085 { | |
1086 MEMORY_BASIC_INFORMATION buf; | |
1087 DWORD result; | |
1088 DWORD protect; | |
1089 LPVOID p; | |
1090 char * base; | |
1091 char * limit, * new_limit; | |
1092 | |
1093 if (!GC_no_win32_dlls) return; | |
1094 p = base = limit = GC_least_described_address(static_root); | |
1095 while (p < GC_sysinfo.lpMaximumApplicationAddress) { | |
1096 result = VirtualQuery(p, &buf, sizeof(buf)); | |
1097 if (result != sizeof(buf) || buf.AllocationBase == 0 | |
1098 || GC_is_heap_base(buf.AllocationBase)) break; | |
1099 new_limit = (char *)p + buf.RegionSize; | |
1100 protect = buf.Protect; | |
1101 if (buf.State == MEM_COMMIT | |
1102 && is_writable(protect)) { | |
1103 if ((char *)p == limit) { | |
1104 limit = new_limit; | |
1105 } else { | |
1106 if (base != limit) GC_add_roots_inner(base, limit, FALSE); | |
1107 base = p; | |
1108 limit = new_limit; | |
1109 } | |
1110 } | |
1111 if (p > (LPVOID)new_limit /* overflow */) break; | |
1112 p = (LPVOID)new_limit; | |
1113 } | |
1114 if (base != limit) GC_add_roots_inner(base, limit, FALSE); | |
1115 } | |
1116 #endif | |
1117 | |
1118 void GC_register_data_segments() | |
1119 { | |
1120 # ifdef MSWIN32 | |
1121 static char dummy; | |
1122 GC_register_root_section((ptr_t)(&dummy)); | |
1123 # endif | |
1124 } | |
1125 | |
1126 # else /* !OS2 && !Windows */ | |
1127 | |
1128 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \ | |
1129 || (defined(LINUX) && defined(SPARC))) && !defined(PCR) | |
1130 ptr_t GC_SysVGetDataStart(max_page_size, etext_addr) | |
1131 int max_page_size; | |
1132 int * etext_addr; | |
1133 { | |
1134 word text_end = ((word)(etext_addr) + sizeof(word) - 1) | |
1135 & ~(sizeof(word) - 1); | |
1136 /* etext rounded to word boundary */ | |
1137 word next_page = ((text_end + (word)max_page_size - 1) | |
1138 & ~((word)max_page_size - 1)); | |
1139 word page_offset = (text_end & ((word)max_page_size - 1)); | |
1140 VOLATILE char * result = (char *)(next_page + page_offset); | |
1141 /* Note that this isnt equivalent to just adding */ | |
1142 /* max_page_size to &etext if &etext is at a page boundary */ | |
1143 | |
1144 GC_setup_temporary_fault_handler(); | |
1145 if (setjmp(GC_jmp_buf) == 0) { | |
1146 /* Try writing to the address. */ | |
1147 *result = *result; | |
1148 GC_reset_fault_handler(); | |
1149 } else { | |
1150 GC_reset_fault_handler(); | |
1151 /* We got here via a longjmp. The address is not readable. */ | |
1152 /* This is known to happen under Solaris 2.4 + gcc, which place */ | |
1153 /* string constants in the text segment, but after etext. */ | |
1154 /* Use plan B. Note that we now know there is a gap between */ | |
1155 /* text and data segments, so plan A bought us something. */ | |
1156 result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE); | |
1157 } | |
1158 return((ptr_t)result); | |
1159 } | |
1160 # endif | |
1161 | |
1162 # if defined(FREEBSD) && defined(I386) && !defined(PCR) | |
1163 /* Its unclear whether this should be identical to the above, or */ | |
1164 /* whether it should apply to non-X86 architectures. */ | |
1165 /* For now we don't assume that there is always an empty page after */ | |
1166 /* etext. But in some cases there actually seems to be slightly more. */ | |
1167 /* This also deals with holes between read-only data and writable data. */ | |
1168 ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr) | |
1169 int max_page_size; | |
1170 int * etext_addr; | |
1171 { | |
1172 word text_end = ((word)(etext_addr) + sizeof(word) - 1) | |
1173 & ~(sizeof(word) - 1); | |
1174 /* etext rounded to word boundary */ | |
1175 VOLATILE word next_page = (text_end + (word)max_page_size - 1) | |
1176 & ~((word)max_page_size - 1); | |
1177 VOLATILE ptr_t result = (ptr_t)text_end; | |
1178 GC_setup_temporary_fault_handler(); | |
1179 if (setjmp(GC_jmp_buf) == 0) { | |
1180 /* Try reading at the address. */ | |
1181 /* This should happen before there is another thread. */ | |
1182 for (; next_page < (word)(DATAEND); next_page += (word)max_page_size) | |
1183 *(VOLATILE char *)next_page; | |
1184 GC_reset_fault_handler(); | |
1185 } else { | |
1186 GC_reset_fault_handler(); | |
1187 /* As above, we go to plan B */ | |
1188 result = GC_find_limit((ptr_t)(DATAEND), FALSE); | |
1189 } | |
1190 return(result); | |
1191 } | |
1192 | |
1193 # endif | |
1194 | |
1195 | |
1196 #ifdef AMIGA | |
1197 | |
1198 # define GC_AMIGA_DS | |
1199 # include "AmigaOS.c" | |
1200 # undef GC_AMIGA_DS | |
1201 | |
1202 #else /* !OS2 && !Windows && !AMIGA */ | |
1203 | |
1204 void GC_register_data_segments() | |
1205 { | |
1206 # if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS) | |
1207 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS) | |
1208 /* As of Solaris 2.3, the Solaris threads implementation */ | |
1209 /* allocates the data structure for the initial thread with */ | |
1210 /* sbrk at process startup. It needs to be scanned, so that */ | |
1211 /* we don't lose some malloc allocated data structures */ | |
1212 /* hanging from it. We're on thin ice here ... */ | |
1213 extern caddr_t sbrk(); | |
1214 | |
1215 GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE); | |
1216 # else | |
1217 GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE); | |
1218 # if defined(DATASTART2) | |
1219 GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE); | |
1220 # endif | |
1221 # endif | |
1222 # endif | |
1223 # if defined(MACOS) | |
1224 { | |
1225 # if defined(THINK_C) | |
1226 extern void* GC_MacGetDataStart(void); | |
1227 /* globals begin above stack and end at a5. */ | |
1228 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), | |
1229 (ptr_t)LMGetCurrentA5(), FALSE); | |
1230 # else | |
1231 # if defined(__MWERKS__) | |
1232 # if !__POWERPC__ | |
1233 extern void* GC_MacGetDataStart(void); | |
1234 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */ | |
1235 # if __option(far_data) | |
1236 extern void* GC_MacGetDataEnd(void); | |
1237 # endif | |
1238 /* globals begin above stack and end at a5. */ | |
1239 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), | |
1240 (ptr_t)LMGetCurrentA5(), FALSE); | |
1241 /* MATTHEW: Handle Far Globals */ | |
1242 # if __option(far_data) | |
1243 /* Far globals follow he QD globals: */ | |
1244 GC_add_roots_inner((ptr_t)LMGetCurrentA5(), | |
1245 (ptr_t)GC_MacGetDataEnd(), FALSE); | |
1246 # endif | |
1247 # else | |
1248 extern char __data_start__[], __data_end__[]; | |
1249 GC_add_roots_inner((ptr_t)&__data_start__, | |
1250 (ptr_t)&__data_end__, FALSE); | |
1251 # endif /* __POWERPC__ */ | |
1252 # endif /* __MWERKS__ */ | |
1253 # endif /* !THINK_C */ | |
1254 } | |
1255 # endif /* MACOS */ | |
1256 | |
1257 /* Dynamic libraries are added at every collection, since they may */ | |
1258 /* change. */ | |
1259 } | |
1260 | |
1261 # endif /* ! AMIGA */ | |
1262 # endif /* ! MSWIN32 && ! MSWINCE*/ | |
1263 # endif /* ! OS2 */ | |
1264 | |
1265 /* | |
1266 * Auxiliary routines for obtaining memory from OS. | |
1267 */ | |
1268 | |
1269 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \ | |
1270 && !defined(MSWIN32) && !defined(MSWINCE) \ | |
1271 && !defined(MACOS) && !defined(DOS4GW) | |
1272 | |
1273 # ifdef SUNOS4 | |
1274 extern caddr_t sbrk(); | |
1275 # endif | |
1276 # ifdef __STDC__ | |
1277 # define SBRK_ARG_T ptrdiff_t | |
1278 # else | |
1279 # define SBRK_ARG_T int | |
1280 # endif | |
1281 | |
1282 | |
1283 # ifdef RS6000 | |
1284 /* The compiler seems to generate speculative reads one past the end of */ | |
1285 /* an allocated object. Hence we need to make sure that the page */ | |
1286 /* following the last heap page is also mapped. */ | |
1287 ptr_t GC_unix_get_mem(bytes) | |
1288 word bytes; | |
1289 { | |
1290 caddr_t cur_brk = (caddr_t)sbrk(0); | |
1291 caddr_t result; | |
1292 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1); | |
1293 static caddr_t my_brk_val = 0; | |
1294 | |
1295 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */ | |
1296 if (lsbs != 0) { | |
1297 if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0); | |
1298 } | |
1299 if (cur_brk == my_brk_val) { | |
1300 /* Use the extra block we allocated last time. */ | |
1301 result = (ptr_t)sbrk((SBRK_ARG_T)bytes); | |
1302 if (result == (caddr_t)(-1)) return(0); | |
1303 result -= GC_page_size; | |
1304 } else { | |
1305 result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes); | |
1306 if (result == (caddr_t)(-1)) return(0); | |
1307 } | |
1308 my_brk_val = result + bytes + GC_page_size; /* Always page aligned */ | |
1309 return((ptr_t)result); | |
1310 } | |
1311 | |
1312 #else /* Not RS6000 */ | |
1313 | |
1314 #if defined(USE_MMAP) | |
1315 /* Tested only under Linux, IRIX5 and Solaris 2 */ | |
1316 | |
1317 #ifdef USE_MMAP_FIXED | |
1318 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE | |
1319 /* Seems to yield better performance on Solaris 2, but can */ | |
1320 /* be unreliable if something is already mapped at the address. */ | |
1321 #else | |
1322 # define GC_MMAP_FLAGS MAP_PRIVATE | |
1323 #endif | |
1324 | |
1325 #ifndef HEAP_START | |
1326 # define HEAP_START 0 | |
1327 #endif | |
1328 | |
1329 ptr_t GC_unix_get_mem(bytes) | |
1330 word bytes; | |
1331 { | |
1332 void *result; | |
1333 static ptr_t last_addr = HEAP_START; | |
1334 | |
1335 # ifndef USE_MMAP_ANON | |
1336 static GC_bool initialized = FALSE; | |
1337 static int fd; | |
1338 | |
1339 if (!initialized) { | |
1340 fd = open("/dev/zero", O_RDONLY); | |
1341 fcntl(fd, F_SETFD, FD_CLOEXEC); | |
1342 initialized = TRUE; | |
1343 } | |
1344 # endif | |
1345 | |
1346 if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg"); | |
1347 # ifdef USE_MMAP_ANON | |
1348 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC, | |
1349 GC_MMAP_FLAGS | MAP_ANON, -1, 0/* offset */); | |
1350 # else | |
1351 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC, | |
1352 GC_MMAP_FLAGS, fd, 0/* offset */); | |
1353 # endif | |
1354 if (result == MAP_FAILED) return(0); | |
1355 last_addr = (ptr_t)result + bytes + GC_page_size - 1; | |
1356 last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1)); | |
1357 # if !defined(LINUX) | |
1358 if (last_addr == 0) { | |
1359 /* Oops. We got the end of the address space. This isn't */ | |
1360 /* usable by arbitrary C code, since one-past-end pointers */ | |
1361 /* don't work, so we discard it and try again. */ | |
1362 munmap(result, (size_t)(-GC_page_size) - (size_t)result); | |
1363 /* Leave last page mapped, so we can't repeat. */ | |
1364 return GC_unix_get_mem(bytes); | |
1365 } | |
1366 # else | |
1367 GC_ASSERT(last_addr != 0); | |
1368 # endif | |
1369 return((ptr_t)result); | |
1370 } | |
1371 | |
1372 #else /* Not RS6000, not USE_MMAP */ | |
1373 ptr_t GC_unix_get_mem(bytes) | |
1374 word bytes; | |
1375 { | |
1376 ptr_t result; | |
1377 # ifdef IRIX5 | |
1378 /* Bare sbrk isn't thread safe. Play by malloc rules. */ | |
1379 /* The equivalent may be needed on other systems as well. */ | |
1380 __LOCK_MALLOC(); | |
1381 # endif | |
1382 { | |
1383 ptr_t cur_brk = (ptr_t)sbrk(0); | |
1384 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1); | |
1385 | |
1386 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */ | |
1387 if (lsbs != 0) { | |
1388 if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0); | |
1389 } | |
1390 result = (ptr_t)sbrk((SBRK_ARG_T)bytes); | |
1391 if (result == (ptr_t)(-1)) result = 0; | |
1392 } | |
1393 # ifdef IRIX5 | |
1394 __UNLOCK_MALLOC(); | |
1395 # endif | |
1396 return(result); | |
1397 } | |
1398 | |
1399 #endif /* Not USE_MMAP */ | |
1400 #endif /* Not RS6000 */ | |
1401 | |
1402 # endif /* UN*X */ | |
1403 | |
1404 # ifdef OS2 | |
1405 | |
1406 void * os2_alloc(size_t bytes) | |
1407 { | |
1408 void * result; | |
1409 | |
1410 if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ | | |
1411 PAG_WRITE | PAG_COMMIT) | |
1412 != NO_ERROR) { | |
1413 return(0); | |
1414 } | |
1415 if (result == 0) return(os2_alloc(bytes)); | |
1416 return(result); | |
1417 } | |
1418 | |
1419 # endif /* OS2 */ | |
1420 | |
1421 | |
1422 # if defined(MSWIN32) || defined(MSWINCE) | |
1423 SYSTEM_INFO GC_sysinfo; | |
1424 # endif | |
1425 | |
1426 # ifdef MSWIN32 | |
1427 | |
1428 # ifdef USE_GLOBAL_ALLOC | |
1429 # define GLOBAL_ALLOC_TEST 1 | |
1430 # else | |
1431 # define GLOBAL_ALLOC_TEST GC_no_win32_dlls | |
1432 # endif | |
1433 | |
1434 word GC_n_heap_bases = 0; | |
1435 | |
1436 ptr_t GC_win32_get_mem(bytes) | |
1437 word bytes; | |
1438 { | |
1439 ptr_t result; | |
1440 | |
1441 if (GLOBAL_ALLOC_TEST) { | |
1442 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */ | |
1443 /* There are also unconfirmed rumors of other */ | |
1444 /* problems, so we dodge the issue. */ | |
1445 result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE); | |
1446 result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1)); | |
1447 } else { | |
1448 /* VirtualProtect only works on regions returned by a */ | |
1449 /* single VirtualAlloc call. Thus we allocate one */ | |
1450 /* extra page, which will prevent merging of blocks */ | |
1451 /* in separate regions, and eliminate any temptation */ | |
1452 /* to call VirtualProtect on a range spanning regions. */ | |
1453 /* This wastes a small amount of memory, and risks */ | |
1454 /* increased fragmentation. But better alternatives */ | |
1455 /* would require effort. */ | |
1456 result = (ptr_t) VirtualAlloc(NULL, bytes + 1, | |
1457 MEM_COMMIT | MEM_RESERVE, | |
1458 PAGE_EXECUTE_READWRITE); | |
1459 } | |
1460 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); | |
1461 /* If I read the documentation correctly, this can */ | |
1462 /* only happen if HBLKSIZE > 64k or not a power of 2. */ | |
1463 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections"); | |
1464 GC_heap_bases[GC_n_heap_bases++] = result; | |
1465 return(result); | |
1466 } | |
1467 | |
1468 void GC_win32_free_heap () | |
1469 { | |
1470 if (GC_no_win32_dlls) { | |
1471 while (GC_n_heap_bases > 0) { | |
1472 GlobalFree (GC_heap_bases[--GC_n_heap_bases]); | |
1473 GC_heap_bases[GC_n_heap_bases] = 0; | |
1474 } | |
1475 } | |
1476 } | |
1477 # endif | |
1478 | |
1479 #ifdef AMIGA | |
1480 # define GC_AMIGA_AM | |
1481 # include "AmigaOS.c" | |
1482 # undef GC_AMIGA_AM | |
1483 #endif | |
1484 | |
1485 | |
1486 # ifdef MSWINCE | |
1487 word GC_n_heap_bases = 0; | |
1488 | |
1489 ptr_t GC_wince_get_mem(bytes) | |
1490 word bytes; | |
1491 { | |
1492 ptr_t result; | |
1493 word i; | |
1494 | |
1495 /* Round up allocation size to multiple of page size */ | |
1496 bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1); | |
1497 | |
1498 /* Try to find reserved, uncommitted pages */ | |
1499 for (i = 0; i < GC_n_heap_bases; i++) { | |
1500 if (((word)(-(signed_word)GC_heap_lengths[i]) | |
1501 & (GC_sysinfo.dwAllocationGranularity-1)) | |
1502 >= bytes) { | |
1503 result = GC_heap_bases[i] + GC_heap_lengths[i]; | |
1504 break; | |
1505 } | |
1506 } | |
1507 | |
1508 if (i == GC_n_heap_bases) { | |
1509 /* Reserve more pages */ | |
1510 word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1) | |
1511 & ~(GC_sysinfo.dwAllocationGranularity-1); | |
1512 /* If we ever support MPROTECT_VDB here, we will probably need to */ | |
1513 /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */ | |
1514 /* never spans regions. It seems to be OK for a VirtualFree argument */ | |
1515 /* to span regions, so we should be OK for now. */ | |
1516 result = (ptr_t) VirtualAlloc(NULL, res_bytes, | |
1517 MEM_RESERVE | MEM_TOP_DOWN, | |
1518 PAGE_EXECUTE_READWRITE); | |
1519 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); | |
1520 /* If I read the documentation correctly, this can */ | |
1521 /* only happen if HBLKSIZE > 64k or not a power of 2. */ | |
1522 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections"); | |
1523 GC_heap_bases[GC_n_heap_bases] = result; | |
1524 GC_heap_lengths[GC_n_heap_bases] = 0; | |
1525 GC_n_heap_bases++; | |
1526 } | |
1527 | |
1528 /* Commit pages */ | |
1529 result = (ptr_t) VirtualAlloc(result, bytes, | |
1530 MEM_COMMIT, | |
1531 PAGE_EXECUTE_READWRITE); | |
1532 if (result != NULL) { | |
1533 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); | |
1534 GC_heap_lengths[i] += bytes; | |
1535 } | |
1536 | |
1537 return(result); | |
1538 } | |
1539 # endif | |
1540 | |
1541 #ifdef USE_MUNMAP | |
1542 | |
1543 /* For now, this only works on Win32/WinCE and some Unix-like */ | |
1544 /* systems. If you have something else, don't define */ | |
1545 /* USE_MUNMAP. */ | |
1546 /* We assume ANSI C to support this feature. */ | |
1547 | |
1548 #if !defined(MSWIN32) && !defined(MSWINCE) | |
1549 | |
1550 #include <unistd.h> | |
1551 #include <sys/mman.h> | |
1552 #include <sys/stat.h> | |
1553 #include <sys/types.h> | |
1554 | |
1555 #endif | |
1556 | |
1557 /* Compute a page aligned starting address for the unmap */ | |
1558 /* operation on a block of size bytes starting at start. */ | |
1559 /* Return 0 if the block is too small to make this feasible. */ | |
1560 ptr_t GC_unmap_start(ptr_t start, word bytes) | |
1561 { | |
1562 ptr_t result = start; | |
1563 /* Round start to next page boundary. */ | |
1564 result += GC_page_size - 1; | |
1565 result = (ptr_t)((word)result & ~(GC_page_size - 1)); | |
1566 if (result + GC_page_size > start + bytes) return 0; | |
1567 return result; | |
1568 } | |
1569 | |
1570 /* Compute end address for an unmap operation on the indicated */ | |
1571 /* block. */ | |
1572 ptr_t GC_unmap_end(ptr_t start, word bytes) | |
1573 { | |
1574 ptr_t end_addr = start + bytes; | |
1575 end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1)); | |
1576 return end_addr; | |
1577 } | |
1578 | |
1579 /* Under Win32/WinCE we commit (map) and decommit (unmap) */ | |
1580 /* memory using VirtualAlloc and VirtualFree. These functions */ | |
1581 /* work on individual allocations of virtual memory, made */ | |
1582 /* previously using VirtualAlloc with the MEM_RESERVE flag. */ | |
1583 /* The ranges we need to (de)commit may span several of these */ | |
1584 /* allocations; therefore we use VirtualQuery to check */ | |
1585 /* allocation lengths, and split up the range as necessary. */ | |
1586 | |
1587 /* We assume that GC_remap is called on exactly the same range */ | |
1588 /* as a previous call to GC_unmap. It is safe to consistently */ | |
1589 /* round the endpoints in both places. */ | |
1590 void GC_unmap(ptr_t start, word bytes) | |
1591 { | |
1592 ptr_t start_addr = GC_unmap_start(start, bytes); | |
1593 ptr_t end_addr = GC_unmap_end(start, bytes); | |
1594 word len = end_addr - start_addr; | |
1595 if (0 == start_addr) return; | |
1596 # if defined(MSWIN32) || defined(MSWINCE) | |
1597 while (len != 0) { | |
1598 MEMORY_BASIC_INFORMATION mem_info; | |
1599 GC_word free_len; | |
1600 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) | |
1601 != sizeof(mem_info)) | |
1602 ABORT("Weird VirtualQuery result"); | |
1603 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; | |
1604 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT)) | |
1605 ABORT("VirtualFree failed"); | |
1606 GC_unmapped_bytes += free_len; | |
1607 start_addr += free_len; | |
1608 len -= free_len; | |
1609 } | |
1610 # else | |
1611 if (munmap(start_addr, len) != 0) ABORT("munmap failed"); | |
1612 GC_unmapped_bytes += len; | |
1613 # endif | |
1614 } | |
1615 | |
1616 | |
1617 void GC_remap(ptr_t start, word bytes) | |
1618 { | |
1619 static int zero_descr = -1; | |
1620 ptr_t start_addr = GC_unmap_start(start, bytes); | |
1621 ptr_t end_addr = GC_unmap_end(start, bytes); | |
1622 word len = end_addr - start_addr; | |
1623 ptr_t result; | |
1624 | |
1625 # if defined(MSWIN32) || defined(MSWINCE) | |
1626 if (0 == start_addr) return; | |
1627 while (len != 0) { | |
1628 MEMORY_BASIC_INFORMATION mem_info; | |
1629 GC_word alloc_len; | |
1630 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) | |
1631 != sizeof(mem_info)) | |
1632 ABORT("Weird VirtualQuery result"); | |
1633 alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; | |
1634 result = VirtualAlloc(start_addr, alloc_len, | |
1635 MEM_COMMIT, | |
1636 PAGE_EXECUTE_READWRITE); | |
1637 if (result != start_addr) { | |
1638 ABORT("VirtualAlloc remapping failed"); | |
1639 } | |
1640 GC_unmapped_bytes -= alloc_len; | |
1641 start_addr += alloc_len; | |
1642 len -= alloc_len; | |
1643 } | |
1644 # else | |
1645 if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR); | |
1646 fcntl(zero_descr, F_SETFD, FD_CLOEXEC); | |
1647 if (0 == start_addr) return; | |
1648 result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC, | |
1649 MAP_FIXED | MAP_PRIVATE, zero_descr, 0); | |
1650 if (result != start_addr) { | |
1651 ABORT("mmap remapping failed"); | |
1652 } | |
1653 GC_unmapped_bytes -= len; | |
1654 # endif | |
1655 } | |
1656 | |
1657 /* Two adjacent blocks have already been unmapped and are about to */ | |
1658 /* be merged. Unmap the whole block. This typically requires */ | |
1659 /* that we unmap a small section in the middle that was not previously */ | |
1660 /* unmapped due to alignment constraints. */ | |
1661 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2) | |
1662 { | |
1663 ptr_t start1_addr = GC_unmap_start(start1, bytes1); | |
1664 ptr_t end1_addr = GC_unmap_end(start1, bytes1); | |
1665 ptr_t start2_addr = GC_unmap_start(start2, bytes2); | |
1666 ptr_t end2_addr = GC_unmap_end(start2, bytes2); | |
1667 ptr_t start_addr = end1_addr; | |
1668 ptr_t end_addr = start2_addr; | |
1669 word len; | |
1670 GC_ASSERT(start1 + bytes1 == start2); | |
1671 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2); | |
1672 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2); | |
1673 if (0 == start_addr) return; | |
1674 len = end_addr - start_addr; | |
1675 # if defined(MSWIN32) || defined(MSWINCE) | |
1676 while (len != 0) { | |
1677 MEMORY_BASIC_INFORMATION mem_info; | |
1678 GC_word free_len; | |
1679 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) | |
1680 != sizeof(mem_info)) | |
1681 ABORT("Weird VirtualQuery result"); | |
1682 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; | |
1683 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT)) | |
1684 ABORT("VirtualFree failed"); | |
1685 GC_unmapped_bytes += free_len; | |
1686 start_addr += free_len; | |
1687 len -= free_len; | |
1688 } | |
1689 # else | |
1690 if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed"); | |
1691 GC_unmapped_bytes += len; | |
1692 # endif | |
1693 } | |
1694 | |
1695 #endif /* USE_MUNMAP */ | |
1696 | |
1697 /* Routine for pushing any additional roots. In THREADS */ | |
1698 /* environment, this is also responsible for marking from */ | |
1699 /* thread stacks. */ | |
1700 #ifndef THREADS | |
1701 void (*GC_push_other_roots)() = 0; | |
1702 #else /* THREADS */ | |
1703 | |
1704 # ifdef PCR | |
1705 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy) | |
1706 { | |
1707 struct PCR_ThCtl_TInfoRep info; | |
1708 PCR_ERes result; | |
1709 | |
1710 info.ti_stkLow = info.ti_stkHi = 0; | |
1711 result = PCR_ThCtl_GetInfo(t, &info); | |
1712 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi)); | |
1713 return(result); | |
1714 } | |
1715 | |
1716 /* Push the contents of an old object. We treat this as stack */ | |
1717 /* data only becasue that makes it robust against mark stack */ | |
1718 /* overflow. */ | |
1719 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data) | |
1720 { | |
1721 GC_push_all_stack((ptr_t)p, (ptr_t)p + size); | |
1722 return(PCR_ERes_okay); | |
1723 } | |
1724 | |
1725 | |
1726 void GC_default_push_other_roots GC_PROTO((void)) | |
1727 { | |
1728 /* Traverse data allocated by previous memory managers. */ | |
1729 { | |
1730 extern struct PCR_MM_ProcsRep * GC_old_allocator; | |
1731 | |
1732 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false, | |
1733 GC_push_old_obj, 0) | |
1734 != PCR_ERes_okay) { | |
1735 ABORT("Old object enumeration failed"); | |
1736 } | |
1737 } | |
1738 /* Traverse all thread stacks. */ | |
1739 if (PCR_ERes_IsErr( | |
1740 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0)) | |
1741 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) { | |
1742 ABORT("Thread stack marking failed\n"); | |
1743 } | |
1744 } | |
1745 | |
1746 # endif /* PCR */ | |
1747 | |
1748 # ifdef SRC_M3 | |
1749 | |
1750 # ifdef ALL_INTERIOR_POINTERS | |
1751 --> misconfigured | |
1752 # endif | |
1753 | |
1754 void GC_push_thread_structures GC_PROTO((void)) | |
1755 { | |
1756 /* Not our responsibibility. */ | |
1757 } | |
1758 | |
1759 extern void ThreadF__ProcessStacks(); | |
1760 | |
1761 void GC_push_thread_stack(start, stop) | |
1762 word start, stop; | |
1763 { | |
1764 GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word)); | |
1765 } | |
1766 | |
1767 /* Push routine with M3 specific calling convention. */ | |
1768 GC_m3_push_root(dummy1, p, dummy2, dummy3) | |
1769 word *p; | |
1770 ptr_t dummy1, dummy2; | |
1771 int dummy3; | |
1772 { | |
1773 word q = *p; | |
1774 | |
1775 GC_PUSH_ONE_STACK(q, p); | |
1776 } | |
1777 | |
1778 /* M3 set equivalent to RTHeap.TracedRefTypes */ | |
1779 typedef struct { int elts[1]; } RefTypeSet; | |
1780 RefTypeSet GC_TracedRefTypes = {{0x1}}; | |
1781 | |
1782 void GC_default_push_other_roots GC_PROTO((void)) | |
1783 { | |
1784 /* Use the M3 provided routine for finding static roots. */ | |
1785 /* This is a bit dubious, since it presumes no C roots. */ | |
1786 /* We handle the collector roots explicitly in GC_push_roots */ | |
1787 RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes); | |
1788 if (GC_words_allocd > 0) { | |
1789 ThreadF__ProcessStacks(GC_push_thread_stack); | |
1790 } | |
1791 /* Otherwise this isn't absolutely necessary, and we have */ | |
1792 /* startup ordering problems. */ | |
1793 } | |
1794 | |
1795 # endif /* SRC_M3 */ | |
1796 | |
1797 # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \ | |
1798 defined(GC_WIN32_THREADS) | |
1799 | |
1800 extern void GC_push_all_stacks(); | |
1801 | |
1802 void GC_default_push_other_roots GC_PROTO((void)) | |
1803 { | |
1804 GC_push_all_stacks(); | |
1805 } | |
1806 | |
1807 # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */ | |
1808 | |
1809 void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots; | |
1810 | |
1811 #endif /* THREADS */ | |
1812 | |
1813 /* | |
1814 * Routines for accessing dirty bits on virtual pages. | |
1815 * We plan to eventually implement four strategies for doing so: | |
1816 * DEFAULT_VDB: A simple dummy implementation that treats every page | |
1817 * as possibly dirty. This makes incremental collection | |
1818 * useless, but the implementation is still correct. | |
1819 * PCR_VDB: Use PPCRs virtual dirty bit facility. | |
1820 * PROC_VDB: Use the /proc facility for reading dirty bits. Only | |
1821 * works under some SVR4 variants. Even then, it may be | |
1822 * too slow to be entirely satisfactory. Requires reading | |
1823 * dirty bits for entire address space. Implementations tend | |
1824 * to assume that the client is a (slow) debugger. | |
1825 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of | |
1826 * dirtied pages. The implementation (and implementability) | |
1827 * is highly system dependent. This usually fails when system | |
1828 * calls write to a protected page. We prevent the read system | |
1829 * call from doing so. It is the clients responsibility to | |
1830 * make sure that other system calls are similarly protected | |
1831 * or write only to the stack. | |
1832 */ | |
1833 | |
1834 GC_bool GC_dirty_maintained = FALSE; | |
1835 | |
1836 # ifdef DEFAULT_VDB | |
1837 | |
1838 /* All of the following assume the allocation lock is held, and */ | |
1839 /* signals are disabled. */ | |
1840 | |
1841 /* The client asserts that unallocated pages in the heap are never */ | |
1842 /* written. */ | |
1843 | |
1844 /* Initialize virtual dirty bit implementation. */ | |
1845 void GC_dirty_init() | |
1846 { | |
1847 GC_dirty_maintained = TRUE; | |
1848 } | |
1849 | |
1850 /* Retrieve system dirty bits for heap to a local buffer. */ | |
1851 /* Restore the systems notion of which pages are dirty. */ | |
1852 void GC_read_dirty() | |
1853 {} | |
1854 | |
1855 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */ | |
1856 /* If the actual page size is different, this returns TRUE if any */ | |
1857 /* of the pages overlapping h are dirty. This routine may err on the */ | |
1858 /* side of labelling pages as dirty (and this implementation does). */ | |
1859 /*ARGSUSED*/ | |
1860 GC_bool GC_page_was_dirty(h) | |
1861 struct hblk *h; | |
1862 { | |
1863 return(TRUE); | |
1864 } | |
1865 | |
1866 /* | |
1867 * The following two routines are typically less crucial. They matter | |
1868 * most with large dynamic libraries, or if we can't accurately identify | |
1869 * stacks, e.g. under Solaris 2.X. Otherwise the following default | |
1870 * versions are adequate. | |
1871 */ | |
1872 | |
1873 /* Could any valid GC heap pointer ever have been written to this page? */ | |
1874 /*ARGSUSED*/ | |
1875 GC_bool GC_page_was_ever_dirty(h) | |
1876 struct hblk *h; | |
1877 { | |
1878 return(TRUE); | |
1879 } | |
1880 | |
1881 /* Reset the n pages starting at h to "was never dirty" status. */ | |
1882 void GC_is_fresh(h, n) | |
1883 struct hblk *h; | |
1884 word n; | |
1885 { | |
1886 } | |
1887 | |
1888 /* A call that: */ | |
1889 /* I) hints that [h, h+nblocks) is about to be written. */ | |
1890 /* II) guarantees that protection is removed. */ | |
1891 /* (I) may speed up some dirty bit implementations. */ | |
1892 /* (II) may be essential if we need to ensure that */ | |
1893 /* pointer-free system call buffers in the heap are */ | |
1894 /* not protected. */ | |
1895 /*ARGSUSED*/ | |
1896 void GC_remove_protection(h, nblocks, is_ptrfree) | |
1897 struct hblk *h; | |
1898 word nblocks; | |
1899 GC_bool is_ptrfree; | |
1900 { | |
1901 } | |
1902 | |
1903 # endif /* DEFAULT_VDB */ | |
1904 | |
1905 | |
1906 # ifdef MPROTECT_VDB | |
1907 | |
1908 /* | |
1909 * See DEFAULT_VDB for interface descriptions. | |
1910 */ | |
1911 | |
1912 /* | |
1913 * This implementation maintains dirty bits itself by catching write | |
1914 * faults and keeping track of them. We assume nobody else catches | |
1915 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls. | |
1916 * This means that clients must ensure that system calls don't write | |
1917 * to the write-protected heap. Probably the best way to do this is to | |
1918 * ensure that system calls write at most to POINTERFREE objects in the | |
1919 * heap, and do even that only if we are on a platform on which those | |
1920 * are not protected. Another alternative is to wrap system calls | |
1921 * (see example for read below), but the current implementation holds | |
1922 * a lock across blocking calls, making it problematic for multithreaded | |
1923 * applications. | |
1924 * We assume the page size is a multiple of HBLKSIZE. | |
1925 * We prefer them to be the same. We avoid protecting POINTERFREE | |
1926 * objects only if they are the same. | |
1927 */ | |
1928 | |
1929 # if !defined(MSWIN32) && !defined(MSWINCE) | |
1930 | |
1931 # include <sys/mman.h> | |
1932 # include <signal.h> | |
1933 # include <sys/syscall.h> | |
1934 | |
1935 # define PROTECT(addr, len) \ | |
1936 if (mprotect((caddr_t)(addr), (size_t)(len), \ | |
1937 PROT_READ | OPT_PROT_EXEC) < 0) { \ | |
1938 ABORT("mprotect failed"); \ | |
1939 } | |
1940 # define UNPROTECT(addr, len) \ | |
1941 if (mprotect((caddr_t)(addr), (size_t)(len), \ | |
1942 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \ | |
1943 ABORT("un-mprotect failed"); \ | |
1944 } | |
1945 | |
1946 # else | |
1947 | |
1948 # ifndef MSWINCE | |
1949 # include <signal.h> | |
1950 # endif | |
1951 | |
1952 static DWORD protect_junk; | |
1953 # define PROTECT(addr, len) \ | |
1954 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \ | |
1955 &protect_junk)) { \ | |
1956 DWORD last_error = GetLastError(); \ | |
1957 GC_printf1("Last error code: %lx\n", last_error); \ | |
1958 ABORT("VirtualProtect failed"); \ | |
1959 } | |
1960 # define UNPROTECT(addr, len) \ | |
1961 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \ | |
1962 &protect_junk)) { \ | |
1963 ABORT("un-VirtualProtect failed"); \ | |
1964 } | |
1965 | |
1966 # endif | |
1967 | |
1968 #if defined(SUNOS4) || defined(FREEBSD) | |
1969 typedef void (* SIG_PF)(); | |
1970 #endif | |
1971 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \ | |
1972 || defined(MACOSX) || defined(HURD) | |
1973 # ifdef __STDC__ | |
1974 typedef void (* SIG_PF)(int); | |
1975 # else | |
1976 typedef void (* SIG_PF)(); | |
1977 # endif | |
1978 #endif | |
1979 #if defined(MSWIN32) | |
1980 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF; | |
1981 # undef SIG_DFL | |
1982 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1) | |
1983 #endif | |
1984 #if defined(MSWINCE) | |
1985 typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *); | |
1986 # undef SIG_DFL | |
1987 # define SIG_DFL (SIG_PF) (-1) | |
1988 #endif | |
1989 | |
1990 #if defined(IRIX5) || defined(OSF1) || defined(HURD) | |
1991 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *); | |
1992 #endif | |
1993 #if defined(SUNOS5SIGS) | |
1994 # ifdef HPUX | |
1995 # define SIGINFO __siginfo | |
1996 # else | |
1997 # define SIGINFO siginfo | |
1998 # endif | |
1999 # ifdef __STDC__ | |
2000 typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *); | |
2001 # else | |
2002 typedef void (* REAL_SIG_PF)(); | |
2003 # endif | |
2004 #endif | |
2005 #if defined(LINUX) | |
2006 # if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2 | |
2007 typedef struct sigcontext s_c; | |
2008 # else /* glibc < 2.2 */ | |
2009 # include <linux/version.h> | |
2010 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32) | |
2011 typedef struct sigcontext s_c; | |
2012 # else | |
2013 typedef struct sigcontext_struct s_c; | |
2014 # endif | |
2015 # endif /* glibc < 2.2 */ | |
2016 # if defined(ALPHA) || defined(M68K) | |
2017 typedef void (* REAL_SIG_PF)(int, int, s_c *); | |
2018 # else | |
2019 # if defined(IA64) || defined(HP_PA) | |
2020 typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *); | |
2021 # else | |
2022 typedef void (* REAL_SIG_PF)(int, s_c); | |
2023 # endif | |
2024 # endif | |
2025 # ifdef ALPHA | |
2026 /* Retrieve fault address from sigcontext structure by decoding */ | |
2027 /* instruction. */ | |
2028 char * get_fault_addr(s_c *sc) { | |
2029 unsigned instr; | |
2030 word faultaddr; | |
2031 | |
2032 instr = *((unsigned *)(sc->sc_pc)); | |
2033 faultaddr = sc->sc_regs[(instr >> 16) & 0x1f]; | |
2034 faultaddr += (word) (((int)instr << 16) >> 16); | |
2035 return (char *)faultaddr; | |
2036 } | |
2037 # endif /* !ALPHA */ | |
2038 # endif | |
2039 | |
2040 # if defined(MACOSX) /* Should also test for PowerPC? */ | |
2041 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *); | |
2042 | |
2043 /* Decodes the machine instruction which was responsible for the sending of the | |
2044 SIGBUS signal. Sadly this is the only way to find the faulting address because | |
2045 the signal handler doesn't get it directly from the kernel (although it is | |
2046 available on the Mach level, but droppped by the BSD personality before it | |
2047 calls our signal handler...) | |
2048 This code should be able to deal correctly with all PPCs starting from the | |
2049 601 up to and including the G4s (including Velocity Engine). */ | |
2050 #define EXTRACT_OP1(iw) (((iw) & 0xFC000000) >> 26) | |
2051 #define EXTRACT_OP2(iw) (((iw) & 0x000007FE) >> 1) | |
2052 #define EXTRACT_REGA(iw) (((iw) & 0x001F0000) >> 16) | |
2053 #define EXTRACT_REGB(iw) (((iw) & 0x03E00000) >> 21) | |
2054 #define EXTRACT_REGC(iw) (((iw) & 0x0000F800) >> 11) | |
2055 #define EXTRACT_DISP(iw) ((short *) &(iw))[1] | |
2056 | |
2057 static char *get_fault_addr(struct sigcontext *scp) | |
2058 { | |
2059 unsigned int instr = *((unsigned int *) scp->sc_ir); | |
2060 unsigned int * regs = &((unsigned int *) scp->sc_regs)[2]; | |
2061 int disp = 0, tmp; | |
2062 unsigned int baseA = 0, baseB = 0; | |
2063 unsigned int addr, alignmask = 0xFFFFFFFF; | |
2064 | |
2065 #ifdef GC_DEBUG_DECODER | |
2066 GC_err_printf1("Instruction: 0x%lx\n", instr); | |
2067 GC_err_printf1("Opcode 1: d\n", (int)EXTRACT_OP1(instr)); | |
2068 #endif | |
2069 switch(EXTRACT_OP1(instr)) { | |
2070 case 38: /* stb */ | |
2071 case 39: /* stbu */ | |
2072 case 54: /* stfd */ | |
2073 case 55: /* stfdu */ | |
2074 case 52: /* stfs */ | |
2075 case 53: /* stfsu */ | |
2076 case 44: /* sth */ | |
2077 case 45: /* sthu */ | |
2078 case 47: /* stmw */ | |
2079 case 36: /* stw */ | |
2080 case 37: /* stwu */ | |
2081 tmp = EXTRACT_REGA(instr); | |
2082 if(tmp > 0) | |
2083 baseA = regs[tmp]; | |
2084 disp = EXTRACT_DISP(instr); | |
2085 break; | |
2086 case 31: | |
2087 #ifdef GC_DEBUG_DECODER | |
2088 GC_err_printf1("Opcode 2: %d\n", (int)EXTRACT_OP2(instr)); | |
2089 #endif | |
2090 switch(EXTRACT_OP2(instr)) { | |
2091 case 86: /* dcbf */ | |
2092 case 54: /* dcbst */ | |
2093 case 1014: /* dcbz */ | |
2094 case 247: /* stbux */ | |
2095 case 215: /* stbx */ | |
2096 case 759: /* stfdux */ | |
2097 case 727: /* stfdx */ | |
2098 case 983: /* stfiwx */ | |
2099 case 695: /* stfsux */ | |
2100 case 663: /* stfsx */ | |
2101 case 918: /* sthbrx */ | |
2102 case 439: /* sthux */ | |
2103 case 407: /* sthx */ | |
2104 case 661: /* stswx */ | |
2105 case 662: /* stwbrx */ | |
2106 case 150: /* stwcx. */ | |
2107 case 183: /* stwux */ | |
2108 case 151: /* stwx */ | |
2109 case 135: /* stvebx */ | |
2110 case 167: /* stvehx */ | |
2111 case 199: /* stvewx */ | |
2112 case 231: /* stvx */ | |
2113 case 487: /* stvxl */ | |
2114 tmp = EXTRACT_REGA(instr); | |
2115 if(tmp > 0) | |
2116 baseA = regs[tmp]; | |
2117 baseB = regs[EXTRACT_REGC(instr)]; | |
2118 /* determine Altivec alignment mask */ | |
2119 switch(EXTRACT_OP2(instr)) { | |
2120 case 167: /* stvehx */ | |
2121 alignmask = 0xFFFFFFFE; | |
2122 break; | |
2123 case 199: /* stvewx */ | |
2124 alignmask = 0xFFFFFFFC; | |
2125 break; | |
2126 case 231: /* stvx */ | |
2127 alignmask = 0xFFFFFFF0; | |
2128 break; | |
2129 case 487: /* stvxl */ | |
2130 alignmask = 0xFFFFFFF0; | |
2131 break; | |
2132 } | |
2133 break; | |
2134 case 725: /* stswi */ | |
2135 tmp = EXTRACT_REGA(instr); | |
2136 if(tmp > 0) | |
2137 baseA = regs[tmp]; | |
2138 break; | |
2139 default: /* ignore instruction */ | |
2140 #ifdef GC_DEBUG_DECODER | |
2141 GC_err_printf("Ignored by inner handler\n"); | |
2142 #endif | |
2143 return NULL; | |
2144 break; | |
2145 } | |
2146 break; | |
2147 default: /* ignore instruction */ | |
2148 #ifdef GC_DEBUG_DECODER | |
2149 GC_err_printf("Ignored by main handler\n"); | |
2150 #endif | |
2151 return NULL; | |
2152 break; | |
2153 } | |
2154 | |
2155 addr = (baseA + baseB) + disp; | |
2156 addr &= alignmask; | |
2157 #ifdef GC_DEBUG_DECODER | |
2158 GC_err_printf1("BaseA: %d\n", baseA); | |
2159 GC_err_printf1("BaseB: %d\n", baseB); | |
2160 GC_err_printf1("Disp: %d\n", disp); | |
2161 GC_err_printf1("Address: %d\n", addr); | |
2162 #endif | |
2163 return (char *)addr; | |
2164 } | |
2165 #endif /* MACOSX */ | |
2166 | |
2167 SIG_PF GC_old_bus_handler; | |
2168 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */ | |
2169 | |
2170 #ifdef THREADS | |
2171 /* We need to lock around the bitmap update in the write fault handler */ | |
2172 /* in order to avoid the risk of losing a bit. We do this with a */ | |
2173 /* test-and-set spin lock if we know how to do that. Otherwise we */ | |
2174 /* check whether we are already in the handler and use the dumb but */ | |
2175 /* safe fallback algorithm of setting all bits in the word. */ | |
2176 /* Contention should be very rare, so we do the minimum to handle it */ | |
2177 /* correctly. */ | |
2178 #ifdef GC_TEST_AND_SET_DEFINED | |
2179 static VOLATILE unsigned int fault_handler_lock = 0; | |
2180 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) { | |
2181 while (GC_test_and_set(&fault_handler_lock)) {} | |
2182 /* Could also revert to set_pht_entry_from_index_safe if initial */ | |
2183 /* GC_test_and_set fails. */ | |
2184 set_pht_entry_from_index(db, index); | |
2185 GC_clear(&fault_handler_lock); | |
2186 } | |
2187 #else /* !GC_TEST_AND_SET_DEFINED */ | |
2188 /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */ | |
2189 /* just before we notice the conflict and correct it. We may end up */ | |
2190 /* looking at it while it's wrong. But this requires contention */ | |
2191 /* exactly when a GC is triggered, which seems far less likely to */ | |
2192 /* fail than the old code, which had no reported failures. Thus we */ | |
2193 /* leave it this way while we think of something better, or support */ | |
2194 /* GC_test_and_set on the remaining platforms. */ | |
2195 static VOLATILE word currently_updating = 0; | |
2196 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) { | |
2197 unsigned int update_dummy; | |
2198 currently_updating = (word)(&update_dummy); | |
2199 set_pht_entry_from_index(db, index); | |
2200 /* If we get contention in the 10 or so instruction window here, */ | |
2201 /* and we get stopped by a GC between the two updates, we lose! */ | |
2202 if (currently_updating != (word)(&update_dummy)) { | |
2203 set_pht_entry_from_index_safe(db, index); | |
2204 /* We claim that if two threads concurrently try to update the */ | |
2205 /* dirty bit vector, the first one to execute UPDATE_START */ | |
2206 /* will see it changed when UPDATE_END is executed. (Note that */ | |
2207 /* &update_dummy must differ in two distinct threads.) It */ | |
2208 /* will then execute set_pht_entry_from_index_safe, thus */ | |
2209 /* returning us to a safe state, though not soon enough. */ | |
2210 } | |
2211 } | |
2212 #endif /* !GC_TEST_AND_SET_DEFINED */ | |
2213 #else /* !THREADS */ | |
2214 # define async_set_pht_entry_from_index(db, index) \ | |
2215 set_pht_entry_from_index(db, index) | |
2216 #endif /* !THREADS */ | |
2217 | |
2218 /*ARGSUSED*/ | |
2219 # if defined (SUNOS4) || defined(FREEBSD) | |
2220 void GC_write_fault_handler(sig, code, scp, addr) | |
2221 int sig, code; | |
2222 struct sigcontext *scp; | |
2223 char * addr; | |
2224 # ifdef SUNOS4 | |
2225 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS) | |
2226 # define CODE_OK (FC_CODE(code) == FC_PROT \ | |
2227 || (FC_CODE(code) == FC_OBJERR \ | |
2228 && FC_ERRNO(code) == FC_PROT)) | |
2229 # endif | |
2230 # ifdef FREEBSD | |
2231 # define SIG_OK (sig == SIGBUS) | |
2232 # define CODE_OK (code == BUS_PAGE_FAULT) | |
2233 # endif | |
2234 # endif | |
2235 # if defined(IRIX5) || defined(OSF1) || defined(HURD) | |
2236 # include <errno.h> | |
2237 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp) | |
2238 # ifdef OSF1 | |
2239 # define SIG_OK (sig == SIGSEGV) | |
2240 # define CODE_OK (code == 2 /* experimentally determined */) | |
2241 # endif | |
2242 # ifdef IRIX5 | |
2243 # define SIG_OK (sig == SIGSEGV) | |
2244 # define CODE_OK (code == EACCES) | |
2245 # endif | |
2246 # ifdef HURD | |
2247 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV) | |
2248 # define CODE_OK TRUE | |
2249 # endif | |
2250 # endif | |
2251 # if defined(LINUX) | |
2252 # if defined(ALPHA) || defined(M68K) | |
2253 void GC_write_fault_handler(int sig, int code, s_c * sc) | |
2254 # else | |
2255 # if defined(IA64) || defined(HP_PA) | |
2256 void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp) | |
2257 # else | |
2258 # if defined(ARM32) | |
2259 void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc) | |
2260 # else | |
2261 void GC_write_fault_handler(int sig, s_c sc) | |
2262 # endif | |
2263 # endif | |
2264 # endif | |
2265 # define SIG_OK (sig == SIGSEGV) | |
2266 # define CODE_OK TRUE | |
2267 /* Empirically c.trapno == 14, on IA32, but is that useful? */ | |
2268 /* Should probably consider alignment issues on other */ | |
2269 /* architectures. */ | |
2270 # endif | |
2271 # if defined(SUNOS5SIGS) | |
2272 # ifdef __STDC__ | |
2273 void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context) | |
2274 # else | |
2275 void GC_write_fault_handler(sig, scp, context) | |
2276 int sig; | |
2277 struct SIGINFO *scp; | |
2278 void * context; | |
2279 # endif | |
2280 # ifdef HPUX | |
2281 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS) | |
2282 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \ | |
2283 || (scp -> si_code == BUS_ADRERR) \ | |
2284 || (scp -> si_code == BUS_UNKNOWN) \ | |
2285 || (scp -> si_code == SEGV_UNKNOWN) \ | |
2286 || (scp -> si_code == BUS_OBJERR) | |
2287 # else | |
2288 # define SIG_OK (sig == SIGSEGV) | |
2289 # define CODE_OK (scp -> si_code == SEGV_ACCERR) | |
2290 # endif | |
2291 # endif | |
2292 | |
2293 # if defined(MACOSX) | |
2294 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp) | |
2295 # define SIG_OK (sig == SIGBUS) | |
2296 # define CODE_OK (code == 0 /* experimentally determined */) | |
2297 # endif | |
2298 | |
2299 # if defined(MSWIN32) || defined(MSWINCE) | |
2300 LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info) | |
2301 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \ | |
2302 STATUS_ACCESS_VIOLATION) | |
2303 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1) | |
2304 /* Write fault */ | |
2305 # endif | |
2306 { | |
2307 register unsigned i; | |
2308 # if defined(HURD) | |
2309 char *addr = (char *) code; | |
2310 # endif | |
2311 # ifdef IRIX5 | |
2312 char * addr = (char *) (size_t) (scp -> sc_badvaddr); | |
2313 # endif | |
2314 # if defined(OSF1) && defined(ALPHA) | |
2315 char * addr = (char *) (scp -> sc_traparg_a0); | |
2316 # endif | |
2317 # ifdef SUNOS5SIGS | |
2318 char * addr = (char *) (scp -> si_addr); | |
2319 # endif | |
2320 # ifdef LINUX | |
2321 # if defined(I386) || defined (X86_64) | |
2322 char * addr = (char *) (sc.cr2); | |
2323 # else | |
2324 # if defined(M68K) | |
2325 char * addr = NULL; | |
2326 | |
2327 struct sigcontext *scp = (struct sigcontext *)(sc); | |
2328 | |
2329 int format = (scp->sc_formatvec >> 12) & 0xf; | |
2330 unsigned long *framedata = (unsigned long *)(scp + 1); | |
2331 unsigned long ea; | |
2332 | |
2333 if (format == 0xa || format == 0xb) { | |
2334 /* 68020/030 */ | |
2335 ea = framedata[2]; | |
2336 } else if (format == 7) { | |
2337 /* 68040 */ | |
2338 ea = framedata[3]; | |
2339 if (framedata[1] & 0x08000000) { | |
2340 /* correct addr on misaligned access */ | |
2341 ea = (ea+4095)&(~4095); | |
2342 } | |
2343 } else if (format == 4) { | |
2344 /* 68060 */ | |
2345 ea = framedata[0]; | |
2346 if (framedata[1] & 0x08000000) { | |
2347 /* correct addr on misaligned access */ | |
2348 ea = (ea+4095)&(~4095); | |
2349 } | |
2350 } | |
2351 addr = (char *)ea; | |
2352 # else | |
2353 # ifdef ALPHA | |
2354 char * addr = get_fault_addr(sc); | |
2355 # else | |
2356 # if defined(IA64) || defined(HP_PA) | |
2357 char * addr = si -> si_addr; | |
2358 /* I believe this is claimed to work on all platforms for */ | |
2359 /* Linux 2.3.47 and later. Hopefully we don't have to */ | |
2360 /* worry about earlier kernels on IA64. */ | |
2361 # else | |
2362 # if defined(POWERPC) | |
2363 char * addr = (char *) (sc.regs->dar); | |
2364 # else | |
2365 # if defined(ARM32) | |
2366 char * addr = (char *)sc.fault_address; | |
2367 # else | |
2368 --> architecture not supported | |
2369 # endif | |
2370 # endif | |
2371 # endif | |
2372 # endif | |
2373 # endif | |
2374 # endif | |
2375 # endif | |
2376 # if defined(MACOSX) | |
2377 char * addr = get_fault_addr(scp); | |
2378 # endif | |
2379 # if defined(MSWIN32) || defined(MSWINCE) | |
2380 char * addr = (char *) (exc_info -> ExceptionRecord | |
2381 -> ExceptionInformation[1]); | |
2382 # define sig SIGSEGV | |
2383 # endif | |
2384 | |
2385 if (SIG_OK && CODE_OK) { | |
2386 register struct hblk * h = | |
2387 (struct hblk *)((word)addr & ~(GC_page_size-1)); | |
2388 GC_bool in_allocd_block; | |
2389 | |
2390 # ifdef SUNOS5SIGS | |
2391 /* Address is only within the correct physical page. */ | |
2392 in_allocd_block = FALSE; | |
2393 for (i = 0; i < divHBLKSZ(GC_page_size); i++) { | |
2394 if (HDR(h+i) != 0) { | |
2395 in_allocd_block = TRUE; | |
2396 } | |
2397 } | |
2398 # else | |
2399 in_allocd_block = (HDR(addr) != 0); | |
2400 # endif | |
2401 if (!in_allocd_block) { | |
2402 /* Heap blocks now begin and end on page boundaries */ | |
2403 SIG_PF old_handler; | |
2404 | |
2405 if (sig == SIGSEGV) { | |
2406 old_handler = GC_old_segv_handler; | |
2407 } else { | |
2408 old_handler = GC_old_bus_handler; | |
2409 } | |
2410 if (old_handler == SIG_DFL) { | |
2411 # if !defined(MSWIN32) && !defined(MSWINCE) | |
2412 GC_err_printf1("Segfault at 0x%lx\n", addr); | |
2413 ABORT("Unexpected bus error or segmentation fault"); | |
2414 # else | |
2415 return(EXCEPTION_CONTINUE_SEARCH); | |
2416 # endif | |
2417 } else { | |
2418 # if defined (SUNOS4) || defined(FREEBSD) | |
2419 (*old_handler) (sig, code, scp, addr); | |
2420 return; | |
2421 # endif | |
2422 # if defined (SUNOS5SIGS) | |
2423 (*(REAL_SIG_PF)old_handler) (sig, scp, context); | |
2424 return; | |
2425 # endif | |
2426 # if defined (LINUX) | |
2427 # if defined(ALPHA) || defined(M68K) | |
2428 (*(REAL_SIG_PF)old_handler) (sig, code, sc); | |
2429 # else | |
2430 # if defined(IA64) || defined(HP_PA) | |
2431 (*(REAL_SIG_PF)old_handler) (sig, si, scp); | |
2432 # else | |
2433 (*(REAL_SIG_PF)old_handler) (sig, sc); | |
2434 # endif | |
2435 # endif | |
2436 return; | |
2437 # endif | |
2438 # if defined (IRIX5) || defined(OSF1) || defined(HURD) | |
2439 (*(REAL_SIG_PF)old_handler) (sig, code, scp); | |
2440 return; | |
2441 # endif | |
2442 # ifdef MACOSX | |
2443 (*(REAL_SIG_PF)old_handler) (sig, code, scp); | |
2444 # endif | |
2445 # ifdef MSWIN32 | |
2446 return((*old_handler)(exc_info)); | |
2447 # endif | |
2448 } | |
2449 } | |
2450 UNPROTECT(h, GC_page_size); | |
2451 /* We need to make sure that no collection occurs between */ | |
2452 /* the UNPROTECT and the setting of the dirty bit. Otherwise */ | |
2453 /* a write by a third thread might go unnoticed. Reversing */ | |
2454 /* the order is just as bad, since we would end up unprotecting */ | |
2455 /* a page in a GC cycle during which it's not marked. */ | |
2456 /* Currently we do this by disabling the thread stopping */ | |
2457 /* signals while this handler is running. An alternative might */ | |
2458 /* be to record the fact that we're about to unprotect, or */ | |
2459 /* have just unprotected a page in the GC's thread structure, */ | |
2460 /* and then to have the thread stopping code set the dirty */ | |
2461 /* flag, if necessary. */ | |
2462 for (i = 0; i < divHBLKSZ(GC_page_size); i++) { | |
2463 register int index = PHT_HASH(h+i); | |
2464 | |
2465 async_set_pht_entry_from_index(GC_dirty_pages, index); | |
2466 } | |
2467 # if defined(OSF1) | |
2468 /* These reset the signal handler each time by default. */ | |
2469 signal(SIGSEGV, (SIG_PF) GC_write_fault_handler); | |
2470 # endif | |
2471 /* The write may not take place before dirty bits are read. */ | |
2472 /* But then we'll fault again ... */ | |
2473 # if defined(MSWIN32) || defined(MSWINCE) | |
2474 return(EXCEPTION_CONTINUE_EXECUTION); | |
2475 # else | |
2476 return; | |
2477 # endif | |
2478 } | |
2479 #if defined(MSWIN32) || defined(MSWINCE) | |
2480 return EXCEPTION_CONTINUE_SEARCH; | |
2481 #else | |
2482 GC_err_printf1("Segfault at 0x%lx\n", addr); | |
2483 ABORT("Unexpected bus error or segmentation fault"); | |
2484 #endif | |
2485 } | |
2486 | |
2487 /* | |
2488 * We hold the allocation lock. We expect block h to be written | |
2489 * shortly. Ensure that all pages containing any part of the n hblks | |
2490 * starting at h are no longer protected. If is_ptrfree is false, | |
2491 * also ensure that they will subsequently appear to be dirty. | |
2492 */ | |
2493 void GC_remove_protection(h, nblocks, is_ptrfree) | |
2494 struct hblk *h; | |
2495 word nblocks; | |
2496 GC_bool is_ptrfree; | |
2497 { | |
2498 struct hblk * h_trunc; /* Truncated to page boundary */ | |
2499 struct hblk * h_end; /* Page boundary following block end */ | |
2500 struct hblk * current; | |
2501 GC_bool found_clean; | |
2502 | |
2503 if (!GC_dirty_maintained) return; | |
2504 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1)); | |
2505 h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1) | |
2506 & ~(GC_page_size-1)); | |
2507 found_clean = FALSE; | |
2508 for (current = h_trunc; current < h_end; ++current) { | |
2509 int index = PHT_HASH(current); | |
2510 | |
2511 if (!is_ptrfree || current < h || current >= h + nblocks) { | |
2512 async_set_pht_entry_from_index(GC_dirty_pages, index); | |
2513 } | |
2514 } | |
2515 UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc); | |
2516 } | |
2517 | |
2518 void GC_dirty_init() | |
2519 { | |
2520 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \ | |
2521 defined(OSF1) || defined(HURD) | |
2522 struct sigaction act, oldact; | |
2523 /* We should probably specify SA_SIGINFO for Linux, and handle */ | |
2524 /* the different architectures more uniformly. */ | |
2525 # if defined(IRIX5) || defined(LINUX) || defined(OSF1) || defined(HURD) | |
2526 act.sa_flags = SA_RESTART; | |
2527 act.sa_handler = (SIG_PF)GC_write_fault_handler; | |
2528 # else | |
2529 act.sa_flags = SA_RESTART | SA_SIGINFO; | |
2530 act.sa_sigaction = GC_write_fault_handler; | |
2531 # endif | |
2532 (void)sigemptyset(&act.sa_mask); | |
2533 # ifdef SIG_SUSPEND | |
2534 /* Arrange to postpone SIG_SUSPEND while we're in a write fault */ | |
2535 /* handler. This effectively makes the handler atomic w.r.t. */ | |
2536 /* stopping the world for GC. */ | |
2537 (void)sigaddset(&act.sa_mask, SIG_SUSPEND); | |
2538 # endif /* SIG_SUSPEND */ | |
2539 # endif | |
2540 # if defined(MACOSX) | |
2541 struct sigaction act, oldact; | |
2542 | |
2543 act.sa_flags = SA_RESTART; | |
2544 act.sa_handler = GC_write_fault_handler; | |
2545 sigemptyset(&act.sa_mask); | |
2546 # endif | |
2547 # ifdef PRINTSTATS | |
2548 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n"); | |
2549 # endif | |
2550 GC_dirty_maintained = TRUE; | |
2551 if (GC_page_size % HBLKSIZE != 0) { | |
2552 GC_err_printf0("Page size not multiple of HBLKSIZE\n"); | |
2553 ABORT("Page size not multiple of HBLKSIZE"); | |
2554 } | |
2555 # if defined(SUNOS4) || defined(FREEBSD) | |
2556 GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler); | |
2557 if (GC_old_bus_handler == SIG_IGN) { | |
2558 GC_err_printf0("Previously ignored bus error!?"); | |
2559 GC_old_bus_handler = SIG_DFL; | |
2560 } | |
2561 if (GC_old_bus_handler != SIG_DFL) { | |
2562 # ifdef PRINTSTATS | |
2563 GC_err_printf0("Replaced other SIGBUS handler\n"); | |
2564 # endif | |
2565 } | |
2566 # endif | |
2567 # if defined(SUNOS4) | |
2568 GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler); | |
2569 if (GC_old_segv_handler == SIG_IGN) { | |
2570 GC_err_printf0("Previously ignored segmentation violation!?"); | |
2571 GC_old_segv_handler = SIG_DFL; | |
2572 } | |
2573 if (GC_old_segv_handler != SIG_DFL) { | |
2574 # ifdef PRINTSTATS | |
2575 GC_err_printf0("Replaced other SIGSEGV handler\n"); | |
2576 # endif | |
2577 } | |
2578 # endif | |
2579 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) \ | |
2580 || defined(OSF1) || defined(HURD) | |
2581 /* SUNOS5SIGS includes HPUX */ | |
2582 # if defined(GC_IRIX_THREADS) | |
2583 sigaction(SIGSEGV, 0, &oldact); | |
2584 sigaction(SIGSEGV, &act, 0); | |
2585 # else | |
2586 sigaction(SIGSEGV, &act, &oldact); | |
2587 # endif | |
2588 # if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO) | |
2589 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */ | |
2590 /* sa_sigaction. */ | |
2591 GC_old_segv_handler = oldact.sa_handler; | |
2592 # else /* Irix 6.x or SUNOS5SIGS or LINUX */ | |
2593 if (oldact.sa_flags & SA_SIGINFO) { | |
2594 GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction); | |
2595 } else { | |
2596 GC_old_segv_handler = oldact.sa_handler; | |
2597 } | |
2598 # endif | |
2599 if (GC_old_segv_handler == SIG_IGN) { | |
2600 GC_err_printf0("Previously ignored segmentation violation!?"); | |
2601 GC_old_segv_handler = SIG_DFL; | |
2602 } | |
2603 if (GC_old_segv_handler != SIG_DFL) { | |
2604 # ifdef PRINTSTATS | |
2605 GC_err_printf0("Replaced other SIGSEGV handler\n"); | |
2606 # endif | |
2607 } | |
2608 # endif | |
2609 # if defined(MACOSX) || defined(HPUX) || defined(LINUX) || defined(HURD) | |
2610 sigaction(SIGBUS, &act, &oldact); | |
2611 GC_old_bus_handler = oldact.sa_handler; | |
2612 if (GC_old_bus_handler == SIG_IGN) { | |
2613 GC_err_printf0("Previously ignored bus error!?"); | |
2614 GC_old_bus_handler = SIG_DFL; | |
2615 } | |
2616 if (GC_old_bus_handler != SIG_DFL) { | |
2617 # ifdef PRINTSTATS | |
2618 GC_err_printf0("Replaced other SIGBUS handler\n"); | |
2619 # endif | |
2620 } | |
2621 # endif /* MACOS || HPUX || LINUX */ | |
2622 # if defined(MSWIN32) | |
2623 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler); | |
2624 if (GC_old_segv_handler != NULL) { | |
2625 # ifdef PRINTSTATS | |
2626 GC_err_printf0("Replaced other UnhandledExceptionFilter\n"); | |
2627 # endif | |
2628 } else { | |
2629 GC_old_segv_handler = SIG_DFL; | |
2630 } | |
2631 # endif | |
2632 } | |
2633 | |
2634 int GC_incremental_protection_needs() | |
2635 { | |
2636 if (GC_page_size == HBLKSIZE) { | |
2637 return GC_PROTECTS_POINTER_HEAP; | |
2638 } else { | |
2639 return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP; | |
2640 } | |
2641 } | |
2642 | |
2643 #define HAVE_INCREMENTAL_PROTECTION_NEEDS | |
2644 | |
2645 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0) | |
2646 | |
2647 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1)) | |
2648 void GC_protect_heap() | |
2649 { | |
2650 ptr_t start; | |
2651 word len; | |
2652 struct hblk * current; | |
2653 struct hblk * current_start; /* Start of block to be protected. */ | |
2654 struct hblk * limit; | |
2655 unsigned i; | |
2656 GC_bool protect_all = | |
2657 (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP)); | |
2658 for (i = 0; i < GC_n_heap_sects; i++) { | |
2659 start = GC_heap_sects[i].hs_start; | |
2660 len = GC_heap_sects[i].hs_bytes; | |
2661 if (protect_all) { | |
2662 PROTECT(start, len); | |
2663 } else { | |
2664 GC_ASSERT(PAGE_ALIGNED(len)) | |
2665 GC_ASSERT(PAGE_ALIGNED(start)) | |
2666 current_start = current = (struct hblk *)start; | |
2667 limit = (struct hblk *)(start + len); | |
2668 while (current < limit) { | |
2669 hdr * hhdr; | |
2670 word nhblks; | |
2671 GC_bool is_ptrfree; | |
2672 | |
2673 GC_ASSERT(PAGE_ALIGNED(current)); | |
2674 GET_HDR(current, hhdr); | |
2675 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { | |
2676 /* This can happen only if we're at the beginning of a */ | |
2677 /* heap segment, and a block spans heap segments. */ | |
2678 /* We will handle that block as part of the preceding */ | |
2679 /* segment. */ | |
2680 GC_ASSERT(current_start == current); | |
2681 current_start = ++current; | |
2682 continue; | |
2683 } | |
2684 if (HBLK_IS_FREE(hhdr)) { | |
2685 GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz)); | |
2686 nhblks = divHBLKSZ(hhdr -> hb_sz); | |
2687 is_ptrfree = TRUE; /* dirty on alloc */ | |
2688 } else { | |
2689 nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz); | |
2690 is_ptrfree = IS_PTRFREE(hhdr); | |
2691 } | |
2692 if (is_ptrfree) { | |
2693 if (current_start < current) { | |
2694 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start); | |
2695 } | |
2696 current_start = (current += nhblks); | |
2697 } else { | |
2698 current += nhblks; | |
2699 } | |
2700 } | |
2701 if (current_start < current) { | |
2702 PROTECT(current_start, (ptr_t)current - (ptr_t)current_start); | |
2703 } | |
2704 } | |
2705 } | |
2706 } | |
2707 | |
2708 /* We assume that either the world is stopped or its OK to lose dirty */ | |
2709 /* bits while this is happenning (as in GC_enable_incremental). */ | |
2710 void GC_read_dirty() | |
2711 { | |
2712 BCOPY((word *)GC_dirty_pages, GC_grungy_pages, | |
2713 (sizeof GC_dirty_pages)); | |
2714 BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages)); | |
2715 GC_protect_heap(); | |
2716 } | |
2717 | |
2718 GC_bool GC_page_was_dirty(h) | |
2719 struct hblk * h; | |
2720 { | |
2721 register word index = PHT_HASH(h); | |
2722 | |
2723 return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index)); | |
2724 } | |
2725 | |
2726 /* | |
2727 * Acquiring the allocation lock here is dangerous, since this | |
2728 * can be called from within GC_call_with_alloc_lock, and the cord | |
2729 * package does so. On systems that allow nested lock acquisition, this | |
2730 * happens to work. | |
2731 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined. | |
2732 */ | |
2733 | |
2734 static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */ | |
2735 | |
2736 void GC_begin_syscall() | |
2737 { | |
2738 if (!I_HOLD_LOCK()) { | |
2739 LOCK(); | |
2740 syscall_acquired_lock = TRUE; | |
2741 } | |
2742 } | |
2743 | |
2744 void GC_end_syscall() | |
2745 { | |
2746 if (syscall_acquired_lock) { | |
2747 syscall_acquired_lock = FALSE; | |
2748 UNLOCK(); | |
2749 } | |
2750 } | |
2751 | |
2752 void GC_unprotect_range(addr, len) | |
2753 ptr_t addr; | |
2754 word len; | |
2755 { | |
2756 struct hblk * start_block; | |
2757 struct hblk * end_block; | |
2758 register struct hblk *h; | |
2759 ptr_t obj_start; | |
2760 | |
2761 if (!GC_dirty_maintained) return; | |
2762 obj_start = GC_base(addr); | |
2763 if (obj_start == 0) return; | |
2764 if (GC_base(addr + len - 1) != obj_start) { | |
2765 ABORT("GC_unprotect_range(range bigger than object)"); | |
2766 } | |
2767 start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1)); | |
2768 end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1)); | |
2769 end_block += GC_page_size/HBLKSIZE - 1; | |
2770 for (h = start_block; h <= end_block; h++) { | |
2771 register word index = PHT_HASH(h); | |
2772 | |
2773 async_set_pht_entry_from_index(GC_dirty_pages, index); | |
2774 } | |
2775 UNPROTECT(start_block, | |
2776 ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE); | |
2777 } | |
2778 | |
2779 #if 0 | |
2780 | |
2781 /* We no longer wrap read by default, since that was causing too many */ | |
2782 /* problems. It is preferred that the client instead avoids writing */ | |
2783 /* to the write-protected heap with a system call. */ | |
2784 /* This still serves as sample code if you do want to wrap system calls.*/ | |
2785 | |
2786 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP) | |
2787 /* Replacement for UNIX system call. */ | |
2788 /* Other calls that write to the heap should be handled similarly. */ | |
2789 /* Note that this doesn't work well for blocking reads: It will hold */ | |
2790 /* the allocation lock for the entire duration of the call. Multithreaded */ | |
2791 /* clients should really ensure that it won't block, either by setting */ | |
2792 /* the descriptor nonblocking, or by calling select or poll first, to */ | |
2793 /* make sure that input is available. */ | |
2794 /* Another, preferred alternative is to ensure that system calls never */ | |
2795 /* write to the protected heap (see above). */ | |
2796 # if defined(__STDC__) && !defined(SUNOS4) | |
2797 # include <unistd.h> | |
2798 # include <sys/uio.h> | |
2799 ssize_t read(int fd, void *buf, size_t nbyte) | |
2800 # else | |
2801 # ifndef LINT | |
2802 int read(fd, buf, nbyte) | |
2803 # else | |
2804 int GC_read(fd, buf, nbyte) | |
2805 # endif | |
2806 int fd; | |
2807 char *buf; | |
2808 int nbyte; | |
2809 # endif | |
2810 { | |
2811 int result; | |
2812 | |
2813 GC_begin_syscall(); | |
2814 GC_unprotect_range(buf, (word)nbyte); | |
2815 # if defined(IRIX5) || defined(GC_LINUX_THREADS) | |
2816 /* Indirect system call may not always be easily available. */ | |
2817 /* We could call _read, but that would interfere with the */ | |
2818 /* libpthread interception of read. */ | |
2819 /* On Linux, we have to be careful with the linuxthreads */ | |
2820 /* read interception. */ | |
2821 { | |
2822 struct iovec iov; | |
2823 | |
2824 iov.iov_base = buf; | |
2825 iov.iov_len = nbyte; | |
2826 result = readv(fd, &iov, 1); | |
2827 } | |
2828 # else | |
2829 # if defined(HURD) | |
2830 result = __read(fd, buf, nbyte); | |
2831 # else | |
2832 /* The two zero args at the end of this list are because one | |
2833 IA-64 syscall() implementation actually requires six args | |
2834 to be passed, even though they aren't always used. */ | |
2835 result = syscall(SYS_read, fd, buf, nbyte, 0, 0); | |
2836 # endif /* !HURD */ | |
2837 # endif | |
2838 GC_end_syscall(); | |
2839 return(result); | |
2840 } | |
2841 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */ | |
2842 | |
2843 #if defined(GC_USE_LD_WRAP) && !defined(THREADS) | |
2844 /* We use the GNU ld call wrapping facility. */ | |
2845 /* This requires that the linker be invoked with "--wrap read". */ | |
2846 /* This can be done by passing -Wl,"--wrap read" to gcc. */ | |
2847 /* I'm not sure that this actually wraps whatever version of read */ | |
2848 /* is called by stdio. That code also mentions __read. */ | |
2849 # include <unistd.h> | |
2850 ssize_t __wrap_read(int fd, void *buf, size_t nbyte) | |
2851 { | |
2852 int result; | |
2853 | |
2854 GC_begin_syscall(); | |
2855 GC_unprotect_range(buf, (word)nbyte); | |
2856 result = __real_read(fd, buf, nbyte); | |
2857 GC_end_syscall(); | |
2858 return(result); | |
2859 } | |
2860 | |
2861 /* We should probably also do this for __read, or whatever stdio */ | |
2862 /* actually calls. */ | |
2863 #endif | |
2864 | |
2865 #endif /* 0 */ | |
2866 | |
2867 /*ARGSUSED*/ | |
2868 GC_bool GC_page_was_ever_dirty(h) | |
2869 struct hblk *h; | |
2870 { | |
2871 return(TRUE); | |
2872 } | |
2873 | |
2874 /* Reset the n pages starting at h to "was never dirty" status. */ | |
2875 /*ARGSUSED*/ | |
2876 void GC_is_fresh(h, n) | |
2877 struct hblk *h; | |
2878 word n; | |
2879 { | |
2880 } | |
2881 | |
2882 # else /* !MPROTECT_VDB */ | |
2883 | |
2884 # ifdef GC_USE_LD_WRAP | |
2885 ssize_t __wrap_read(int fd, void *buf, size_t nbyte) | |
2886 { return __real_read(fd, buf, nbyte); } | |
2887 # endif | |
2888 | |
2889 # endif /* MPROTECT_VDB */ | |
2890 | |
2891 # ifdef PROC_VDB | |
2892 | |
2893 /* | |
2894 * See DEFAULT_VDB for interface descriptions. | |
2895 */ | |
2896 | |
2897 /* | |
2898 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system | |
2899 * from which we can read page modified bits. This facility is far from | |
2900 * optimal (e.g. we would like to get the info for only some of the | |
2901 * address space), but it avoids intercepting system calls. | |
2902 */ | |
2903 | |
2904 #include <errno.h> | |
2905 #include <sys/types.h> | |
2906 #include <sys/signal.h> | |
2907 #include <sys/fault.h> | |
2908 #include <sys/syscall.h> | |
2909 #include <sys/procfs.h> | |
2910 #include <sys/stat.h> | |
2911 | |
2912 #define INITIAL_BUF_SZ 4096 | |
2913 word GC_proc_buf_size = INITIAL_BUF_SZ; | |
2914 char *GC_proc_buf; | |
2915 | |
2916 #ifdef GC_SOLARIS_THREADS | |
2917 /* We don't have exact sp values for threads. So we count on */ | |
2918 /* occasionally declaring stack pages to be fresh. Thus we */ | |
2919 /* need a real implementation of GC_is_fresh. We can't clear */ | |
2920 /* entries in GC_written_pages, since that would declare all */ | |
2921 /* pages with the given hash address to be fresh. */ | |
2922 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */ | |
2923 struct hblk ** GC_fresh_pages; /* A direct mapped cache. */ | |
2924 /* Collisions are dropped. */ | |
2925 | |
2926 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1)) | |
2927 # define ADD_FRESH_PAGE(h) \ | |
2928 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h) | |
2929 # define PAGE_IS_FRESH(h) \ | |
2930 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0) | |
2931 #endif | |
2932 | |
2933 /* Add all pages in pht2 to pht1 */ | |
2934 void GC_or_pages(pht1, pht2) | |
2935 page_hash_table pht1, pht2; | |
2936 { | |
2937 register int i; | |
2938 | |
2939 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i]; | |
2940 } | |
2941 | |
2942 int GC_proc_fd; | |
2943 | |
2944 void GC_dirty_init() | |
2945 { | |
2946 int fd; | |
2947 char buf[30]; | |
2948 | |
2949 GC_dirty_maintained = TRUE; | |
2950 if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) { | |
2951 register int i; | |
2952 | |
2953 for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1); | |
2954 # ifdef PRINTSTATS | |
2955 GC_printf1("Allocated words:%lu:all pages may have been written\n", | |
2956 (unsigned long) | |
2957 (GC_words_allocd + GC_words_allocd_before_gc)); | |
2958 # endif | |
2959 } | |
2960 sprintf(buf, "/proc/%d", getpid()); | |
2961 fd = open(buf, O_RDONLY); | |
2962 if (fd < 0) { | |
2963 ABORT("/proc open failed"); | |
2964 } | |
2965 GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0); | |
2966 close(fd); | |
2967 syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC); | |
2968 if (GC_proc_fd < 0) { | |
2969 ABORT("/proc ioctl failed"); | |
2970 } | |
2971 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size); | |
2972 # ifdef GC_SOLARIS_THREADS | |
2973 GC_fresh_pages = (struct hblk **) | |
2974 GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *)); | |
2975 if (GC_fresh_pages == 0) { | |
2976 GC_err_printf0("No space for fresh pages\n"); | |
2977 EXIT(); | |
2978 } | |
2979 BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *)); | |
2980 # endif | |
2981 } | |
2982 | |
2983 /* Ignore write hints. They don't help us here. */ | |
2984 /*ARGSUSED*/ | |
2985 void GC_remove_protection(h, nblocks, is_ptrfree) | |
2986 struct hblk *h; | |
2987 word nblocks; | |
2988 GC_bool is_ptrfree; | |
2989 { | |
2990 } | |
2991 | |
2992 #ifdef GC_SOLARIS_THREADS | |
2993 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes) | |
2994 #else | |
2995 # define READ(fd,buf,nbytes) read(fd, buf, nbytes) | |
2996 #endif | |
2997 | |
2998 void GC_read_dirty() | |
2999 { | |
3000 unsigned long ps, np; | |
3001 int nmaps; | |
3002 ptr_t vaddr; | |
3003 struct prasmap * map; | |
3004 char * bufp; | |
3005 ptr_t current_addr, limit; | |
3006 int i; | |
3007 int dummy; | |
3008 | |
3009 BZERO(GC_grungy_pages, (sizeof GC_grungy_pages)); | |
3010 | |
3011 bufp = GC_proc_buf; | |
3012 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) { | |
3013 # ifdef PRINTSTATS | |
3014 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n", | |
3015 GC_proc_buf_size); | |
3016 # endif | |
3017 { | |
3018 /* Retry with larger buffer. */ | |
3019 word new_size = 2 * GC_proc_buf_size; | |
3020 char * new_buf = GC_scratch_alloc(new_size); | |
3021 | |
3022 if (new_buf != 0) { | |
3023 GC_proc_buf = bufp = new_buf; | |
3024 GC_proc_buf_size = new_size; | |
3025 } | |
3026 if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) { | |
3027 WARN("Insufficient space for /proc read\n", 0); | |
3028 /* Punt: */ | |
3029 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table)); | |
3030 memset(GC_written_pages, 0xff, sizeof(page_hash_table)); | |
3031 # ifdef GC_SOLARIS_THREADS | |
3032 BZERO(GC_fresh_pages, | |
3033 MAX_FRESH_PAGES * sizeof (struct hblk *)); | |
3034 # endif | |
3035 return; | |
3036 } | |
3037 } | |
3038 } | |
3039 /* Copy dirty bits into GC_grungy_pages */ | |
3040 nmaps = ((struct prpageheader *)bufp) -> pr_nmap; | |
3041 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n", | |
3042 nmaps, PG_REFERENCED, PG_MODIFIED); */ | |
3043 bufp = bufp + sizeof(struct prpageheader); | |
3044 for (i = 0; i < nmaps; i++) { | |
3045 map = (struct prasmap *)bufp; | |
3046 vaddr = (ptr_t)(map -> pr_vaddr); | |
3047 ps = map -> pr_pagesize; | |
3048 np = map -> pr_npage; | |
3049 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */ | |
3050 limit = vaddr + ps * np; | |
3051 bufp += sizeof (struct prasmap); | |
3052 for (current_addr = vaddr; | |
3053 current_addr < limit; current_addr += ps){ | |
3054 if ((*bufp++) & PG_MODIFIED) { | |
3055 register struct hblk * h = (struct hblk *) current_addr; | |
3056 | |
3057 while ((ptr_t)h < current_addr + ps) { | |
3058 register word index = PHT_HASH(h); | |
3059 | |
3060 set_pht_entry_from_index(GC_grungy_pages, index); | |
3061 # ifdef GC_SOLARIS_THREADS | |
3062 { | |
3063 register int slot = FRESH_PAGE_SLOT(h); | |
3064 | |
3065 if (GC_fresh_pages[slot] == h) { | |
3066 GC_fresh_pages[slot] = 0; | |
3067 } | |
3068 } | |
3069 # endif | |
3070 h++; | |
3071 } | |
3072 } | |
3073 } | |
3074 bufp += sizeof(long) - 1; | |
3075 bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1)); | |
3076 } | |
3077 /* Update GC_written_pages. */ | |
3078 GC_or_pages(GC_written_pages, GC_grungy_pages); | |
3079 # ifdef GC_SOLARIS_THREADS | |
3080 /* Make sure that old stacks are considered completely clean */ | |
3081 /* unless written again. */ | |
3082 GC_old_stacks_are_fresh(); | |
3083 # endif | |
3084 } | |
3085 | |
3086 #undef READ | |
3087 | |
3088 GC_bool GC_page_was_dirty(h) | |
3089 struct hblk *h; | |
3090 { | |
3091 register word index = PHT_HASH(h); | |
3092 register GC_bool result; | |
3093 | |
3094 result = get_pht_entry_from_index(GC_grungy_pages, index); | |
3095 # ifdef GC_SOLARIS_THREADS | |
3096 if (result && PAGE_IS_FRESH(h)) result = FALSE; | |
3097 /* This happens only if page was declared fresh since */ | |
3098 /* the read_dirty call, e.g. because it's in an unused */ | |
3099 /* thread stack. It's OK to treat it as clean, in */ | |
3100 /* that case. And it's consistent with */ | |
3101 /* GC_page_was_ever_dirty. */ | |
3102 # endif | |
3103 return(result); | |
3104 } | |
3105 | |
3106 GC_bool GC_page_was_ever_dirty(h) | |
3107 struct hblk *h; | |
3108 { | |
3109 register word index = PHT_HASH(h); | |
3110 register GC_bool result; | |
3111 | |
3112 result = get_pht_entry_from_index(GC_written_pages, index); | |
3113 # ifdef GC_SOLARIS_THREADS | |
3114 if (result && PAGE_IS_FRESH(h)) result = FALSE; | |
3115 # endif | |
3116 return(result); | |
3117 } | |
3118 | |
3119 /* Caller holds allocation lock. */ | |
3120 void GC_is_fresh(h, n) | |
3121 struct hblk *h; | |
3122 word n; | |
3123 { | |
3124 | |
3125 register word index; | |
3126 | |
3127 # ifdef GC_SOLARIS_THREADS | |
3128 register word i; | |
3129 | |
3130 if (GC_fresh_pages != 0) { | |
3131 for (i = 0; i < n; i++) { | |
3132 ADD_FRESH_PAGE(h + i); | |
3133 } | |
3134 } | |
3135 # endif | |
3136 } | |
3137 | |
3138 # endif /* PROC_VDB */ | |
3139 | |
3140 | |
3141 # ifdef PCR_VDB | |
3142 | |
3143 # include "vd/PCR_VD.h" | |
3144 | |
3145 # define NPAGES (32*1024) /* 128 MB */ | |
3146 | |
3147 PCR_VD_DB GC_grungy_bits[NPAGES]; | |
3148 | |
3149 ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */ | |
3150 /* HBLKSIZE aligned. */ | |
3151 | |
3152 void GC_dirty_init() | |
3153 { | |
3154 GC_dirty_maintained = TRUE; | |
3155 /* For the time being, we assume the heap generally grows up */ | |
3156 GC_vd_base = GC_heap_sects[0].hs_start; | |
3157 if (GC_vd_base == 0) { | |
3158 ABORT("Bad initial heap segment"); | |
3159 } | |
3160 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE) | |
3161 != PCR_ERes_okay) { | |
3162 ABORT("dirty bit initialization failed"); | |
3163 } | |
3164 } | |
3165 | |
3166 void GC_read_dirty() | |
3167 { | |
3168 /* lazily enable dirty bits on newly added heap sects */ | |
3169 { | |
3170 static int onhs = 0; | |
3171 int nhs = GC_n_heap_sects; | |
3172 for( ; onhs < nhs; onhs++ ) { | |
3173 PCR_VD_WriteProtectEnable( | |
3174 GC_heap_sects[onhs].hs_start, | |
3175 GC_heap_sects[onhs].hs_bytes ); | |
3176 } | |
3177 } | |
3178 | |
3179 | |
3180 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits) | |
3181 != PCR_ERes_okay) { | |
3182 ABORT("dirty bit read failed"); | |
3183 } | |
3184 } | |
3185 | |
3186 GC_bool GC_page_was_dirty(h) | |
3187 struct hblk *h; | |
3188 { | |
3189 if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) { | |
3190 return(TRUE); | |
3191 } | |
3192 return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit); | |
3193 } | |
3194 | |
3195 /*ARGSUSED*/ | |
3196 void GC_remove_protection(h, nblocks, is_ptrfree) | |
3197 struct hblk *h; | |
3198 word nblocks; | |
3199 GC_bool is_ptrfree; | |
3200 { | |
3201 PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE); | |
3202 PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE); | |
3203 } | |
3204 | |
3205 # endif /* PCR_VDB */ | |
3206 | |
3207 # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS | |
3208 int GC_incremental_protection_needs() | |
3209 { | |
3210 return GC_PROTECTS_NONE; | |
3211 } | |
3212 # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */ | |
3213 | |
3214 /* | |
3215 * Call stack save code for debugging. | |
3216 * Should probably be in mach_dep.c, but that requires reorganization. | |
3217 */ | |
3218 | |
3219 /* I suspect the following works for most X86 *nix variants, so */ | |
3220 /* long as the frame pointer is explicitly stored. In the case of gcc, */ | |
3221 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */ | |
3222 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN) | |
3223 # include <features.h> | |
3224 | |
3225 struct frame { | |
3226 struct frame *fr_savfp; | |
3227 long fr_savpc; | |
3228 long fr_arg[NARGS]; /* All the arguments go here. */ | |
3229 }; | |
3230 #endif | |
3231 | |
3232 #if defined(SPARC) | |
3233 # if defined(LINUX) | |
3234 # include <features.h> | |
3235 | |
3236 struct frame { | |
3237 long fr_local[8]; | |
3238 long fr_arg[6]; | |
3239 struct frame *fr_savfp; | |
3240 long fr_savpc; | |
3241 # ifndef __arch64__ | |
3242 char *fr_stret; | |
3243 # endif | |
3244 long fr_argd[6]; | |
3245 long fr_argx[0]; | |
3246 }; | |
3247 # else | |
3248 # if defined(SUNOS4) | |
3249 # include <machine/frame.h> | |
3250 # else | |
3251 # if defined (DRSNX) | |
3252 # include <sys/sparc/frame.h> | |
3253 # else | |
3254 # if defined(OPENBSD) || defined(NETBSD) | |
3255 # include <frame.h> | |
3256 # else | |
3257 # include <sys/frame.h> | |
3258 # endif | |
3259 # endif | |
3260 # endif | |
3261 # endif | |
3262 # if NARGS > 6 | |
3263 --> We only know how to to get the first 6 arguments | |
3264 # endif | |
3265 #endif /* SPARC */ | |
3266 | |
3267 #ifdef NEED_CALLINFO | |
3268 /* Fill in the pc and argument information for up to NFRAMES of my */ | |
3269 /* callers. Ignore my frame and my callers frame. */ | |
3270 | |
3271 #ifdef LINUX | |
3272 # include <unistd.h> | |
3273 #endif | |
3274 | |
3275 #endif /* NEED_CALLINFO */ | |
3276 | |
3277 #ifdef SAVE_CALL_CHAIN | |
3278 | |
3279 #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \ | |
3280 && defined(GC_HAVE_BUILTIN_BACKTRACE) | |
3281 | |
3282 #include <execinfo.h> | |
3283 | |
3284 void GC_save_callers (info) | |
3285 struct callinfo info[NFRAMES]; | |
3286 { | |
3287 void * tmp_info[NFRAMES + 1]; | |
3288 int npcs, i; | |
3289 # define IGNORE_FRAMES 1 | |
3290 | |
3291 /* We retrieve NFRAMES+1 pc values, but discard the first, since it */ | |
3292 /* points to our own frame. */ | |
3293 GC_ASSERT(sizeof(struct callinfo) == sizeof(void *)); | |
3294 npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES); | |
3295 BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *)); | |
3296 for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0; | |
3297 } | |
3298 | |
3299 #else /* No builtin backtrace; do it ourselves */ | |
3300 | |
3301 #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC) | |
3302 # define FR_SAVFP fr_fp | |
3303 # define FR_SAVPC fr_pc | |
3304 #else | |
3305 # define FR_SAVFP fr_savfp | |
3306 # define FR_SAVPC fr_savpc | |
3307 #endif | |
3308 | |
3309 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9)) | |
3310 # define BIAS 2047 | |
3311 #else | |
3312 # define BIAS 0 | |
3313 #endif | |
3314 | |
3315 void GC_save_callers (info) | |
3316 struct callinfo info[NFRAMES]; | |
3317 { | |
3318 struct frame *frame; | |
3319 struct frame *fp; | |
3320 int nframes = 0; | |
3321 # ifdef I386 | |
3322 /* We assume this is turned on only with gcc as the compiler. */ | |
3323 asm("movl %%ebp,%0" : "=r"(frame)); | |
3324 fp = frame; | |
3325 # else | |
3326 word GC_save_regs_in_stack(); | |
3327 | |
3328 frame = (struct frame *) GC_save_regs_in_stack (); | |
3329 fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS); | |
3330 #endif | |
3331 | |
3332 for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp) | |
3333 && (nframes < NFRAMES)); | |
3334 fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) { | |
3335 register int i; | |
3336 | |
3337 info[nframes].ci_pc = fp->FR_SAVPC; | |
3338 # if NARGS > 0 | |
3339 for (i = 0; i < NARGS; i++) { | |
3340 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]); | |
3341 } | |
3342 # endif /* NARGS > 0 */ | |
3343 } | |
3344 if (nframes < NFRAMES) info[nframes].ci_pc = 0; | |
3345 } | |
3346 | |
3347 #endif /* No builtin backtrace */ | |
3348 | |
3349 #endif /* SAVE_CALL_CHAIN */ | |
3350 | |
3351 #ifdef NEED_CALLINFO | |
3352 | |
3353 /* Print info to stderr. We do NOT hold the allocation lock */ | |
3354 void GC_print_callers (info) | |
3355 struct callinfo info[NFRAMES]; | |
3356 { | |
3357 register int i; | |
3358 static int reentry_count = 0; | |
3359 GC_bool stop = FALSE; | |
3360 | |
3361 LOCK(); | |
3362 ++reentry_count; | |
3363 UNLOCK(); | |
3364 | |
3365 # if NFRAMES == 1 | |
3366 GC_err_printf0("\tCaller at allocation:\n"); | |
3367 # else | |
3368 GC_err_printf0("\tCall chain at allocation:\n"); | |
3369 # endif | |
3370 for (i = 0; i < NFRAMES && !stop ; i++) { | |
3371 if (info[i].ci_pc == 0) break; | |
3372 # if NARGS > 0 | |
3373 { | |
3374 int j; | |
3375 | |
3376 GC_err_printf0("\t\targs: "); | |
3377 for (j = 0; j < NARGS; j++) { | |
3378 if (j != 0) GC_err_printf0(", "); | |
3379 GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]), | |
3380 ~(info[i].ci_arg[j])); | |
3381 } | |
3382 GC_err_printf0("\n"); | |
3383 } | |
3384 # endif | |
3385 if (reentry_count > 1) { | |
3386 /* We were called during an allocation during */ | |
3387 /* a previous GC_print_callers call; punt. */ | |
3388 GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc); | |
3389 continue; | |
3390 } | |
3391 { | |
3392 # ifdef LINUX | |
3393 FILE *pipe; | |
3394 # endif | |
3395 # if defined(GC_HAVE_BUILTIN_BACKTRACE) | |
3396 char **sym_name = | |
3397 backtrace_symbols((void **)(&(info[i].ci_pc)), 1); | |
3398 char *name = sym_name[0]; | |
3399 # else | |
3400 char buf[40]; | |
3401 char *name = buf; | |
3402 sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc); | |
3403 # endif | |
3404 # if defined(LINUX) && !defined(SMALL_CONFIG) | |
3405 /* Try for a line number. */ | |
3406 { | |
3407 # define EXE_SZ 100 | |
3408 static char exe_name[EXE_SZ]; | |
3409 # define CMD_SZ 200 | |
3410 char cmd_buf[CMD_SZ]; | |
3411 # define RESULT_SZ 200 | |
3412 static char result_buf[RESULT_SZ]; | |
3413 size_t result_len; | |
3414 static GC_bool found_exe_name = FALSE; | |
3415 static GC_bool will_fail = FALSE; | |
3416 int ret_code; | |
3417 /* Try to get it via a hairy and expensive scheme. */ | |
3418 /* First we get the name of the executable: */ | |
3419 if (will_fail) goto out; | |
3420 if (!found_exe_name) { | |
3421 ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ); | |
3422 if (ret_code < 0 || ret_code >= EXE_SZ | |
3423 || exe_name[0] != '/') { | |
3424 will_fail = TRUE; /* Dont try again. */ | |
3425 goto out; | |
3426 } | |
3427 exe_name[ret_code] = '\0'; | |
3428 found_exe_name = TRUE; | |
3429 } | |
3430 /* Then we use popen to start addr2line -e <exe> <addr> */ | |
3431 /* There are faster ways to do this, but hopefully this */ | |
3432 /* isn't time critical. */ | |
3433 sprintf(cmd_buf, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name, | |
3434 (unsigned long)info[i].ci_pc); | |
3435 pipe = popen(cmd_buf, "r"); | |
3436 if (pipe == NULL | |
3437 || (result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe)) | |
3438 == 0) { | |
3439 if (pipe != NULL) pclose(pipe); | |
3440 will_fail = TRUE; | |
3441 goto out; | |
3442 } | |
3443 if (result_buf[result_len - 1] == '\n') --result_len; | |
3444 result_buf[result_len] = 0; | |
3445 if (result_buf[0] == '?' | |
3446 || result_buf[result_len-2] == ':' | |
3447 && result_buf[result_len-1] == '0') { | |
3448 pclose(pipe); | |
3449 goto out; | |
3450 } | |
3451 /* Get rid of embedded newline, if any. Test for "main" */ | |
3452 { | |
3453 char * nl = strchr(result_buf, '\n'); | |
3454 if (nl != NULL && nl < result_buf + result_len) { | |
3455 *nl = ':'; | |
3456 } | |
3457 if (strncmp(result_buf, "main", nl - result_buf) == 0) { | |
3458 stop = TRUE; | |
3459 } | |
3460 } | |
3461 if (result_len < RESULT_SZ - 25) { | |
3462 /* Add in hex address */ | |
3463 sprintf(result_buf + result_len, " [0x%lx]", | |
3464 (unsigned long)info[i].ci_pc); | |
3465 } | |
3466 name = result_buf; | |
3467 pclose(pipe); | |
3468 out: | |
3469 } | |
3470 # endif /* LINUX */ | |
3471 GC_err_printf1("\t\t%s\n", name); | |
3472 # if defined(GC_HAVE_BUILTIN_BACKTRACE) | |
3473 free(sym_name); /* May call GC_free; that's OK */ | |
3474 # endif | |
3475 } | |
3476 } | |
3477 LOCK(); | |
3478 --reentry_count; | |
3479 UNLOCK(); | |
3480 } | |
3481 | |
3482 #endif /* NEED_CALLINFO */ | |
3483 | |
3484 #if defined(LINUX) && defined(__ELF__) && \ | |
3485 (!defined(SMALL_CONFIG) || defined(USE_PROC_FOR_LIBRARIES)) | |
3486 #ifdef GC_USE_LD_WRAP | |
3487 # define READ __real_read | |
3488 #else | |
3489 # define READ read | |
3490 #endif | |
3491 | |
3492 | |
3493 /* Repeatedly perform a read call until the buffer is filled or */ | |
3494 /* we encounter EOF. */ | |
3495 ssize_t GC_repeat_read(int fd, char *buf, size_t count) | |
3496 { | |
3497 ssize_t num_read = 0; | |
3498 ssize_t result; | |
3499 | |
3500 while (num_read < count) { | |
3501 result = READ(fd, buf + num_read, count - num_read); | |
3502 if (result < 0) return result; | |
3503 if (result == 0) break; | |
3504 num_read += result; | |
3505 } | |
3506 return num_read; | |
3507 } | |
3508 #endif /* LINUX && ... */ | |
3509 | |
3510 | |
3511 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG) | |
3512 | |
3513 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for | |
3514 addresses in FIND_LEAK output. */ | |
3515 | |
3516 void GC_print_address_map() | |
3517 { | |
3518 int f; | |
3519 int result; | |
3520 char maps_temp[32768]; | |
3521 GC_err_printf0("---------- Begin address map ----------\n"); | |
3522 f = open("/proc/self/maps", O_RDONLY); | |
3523 if (-1 == f) ABORT("Couldn't open /proc/self/maps"); | |
3524 do { | |
3525 result = GC_repeat_read(f, maps_temp, sizeof(maps_temp)); | |
3526 if (result <= 0) ABORT("Couldn't read /proc/self/maps"); | |
3527 GC_err_write(maps_temp, result); | |
3528 } while (result == sizeof(maps_temp)); | |
3529 close(f); | |
3530 GC_err_printf0("---------- End address map ----------\n"); | |
3531 } | |
3532 | |
3533 #endif | |
3534 | |
3535 |