Annotation of OpenXM_contrib2/asir2000/gc/os_dep.c, Revision 1.6
1.1 noro 1: /*
1.2 noro 2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
1.1 noro 3: * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
1.2 noro 4: * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
1.1 noro 6: *
7: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9: *
10: * Permission is hereby granted to use or copy this program
11: * for any purpose, provided the above notices are retained on all copies.
12: * Permission to modify the code and to distribute modified code is granted,
13: * provided the above notices are retained, and a notice that the code was
14: * modified is included with the above copyright notice.
15: */
16:
1.4 noro 17: # include "private/gc_priv.h"
1.1 noro 18:
19: # if defined(LINUX) && !defined(POWERPC)
20: # include <linux/version.h>
21: # if (LINUX_VERSION_CODE <= 0x10400)
22: /* Ugly hack to get struct sigcontext_struct definition. Required */
23: /* for some early 1.3.X releases. Will hopefully go away soon. */
24: /* in some later Linux releases, asm/sigcontext.h may have to */
25: /* be included instead. */
26: # define __KERNEL__
27: # include <asm/signal.h>
28: # undef __KERNEL__
29: # else
30: /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31: /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32: /* prototypes, so we have to include the top-level sigcontext.h to */
33: /* make sure the former gets defined to be the latter if appropriate. */
34: # include <features.h>
35: # if 2 <= __GLIBC__
1.2 noro 36: # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
1.1 noro 37: /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38: /* has the right declaration for glibc 2.1. */
39: # include <sigcontext.h>
40: # endif /* 0 == __GLIBC_MINOR__ */
41: # else /* not 2 <= __GLIBC__ */
42: /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43: /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44: # include <asm/sigcontext.h>
45: # endif /* 2 <= __GLIBC__ */
46: # endif
47: # endif
1.4 noro 48: # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
49: && !defined(MSWINCE)
1.1 noro 50: # include <sys/types.h>
51: # if !defined(MSWIN32) && !defined(SUNOS4)
52: # include <unistd.h>
53: # endif
54: # endif
55:
56: # include <stdio.h>
1.4 noro 57: # if defined(MSWINCE)
58: # define SIGSEGV 0 /* value is irrelevant */
59: # else
60: # include <signal.h>
61: # endif
1.1 noro 62:
63: /* Blatantly OS dependent routines, except for those that are related */
1.2 noro 64: /* to dynamic loading. */
1.1 noro 65:
1.6 ! noro 66: # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
1.1 noro 67: # define NEED_FIND_LIMIT
68: # endif
69:
1.6 ! noro 70: # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
1.1 noro 71: # define NEED_FIND_LIMIT
72: # endif
73:
1.3 noro 74: # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
1.1 noro 75: # define NEED_FIND_LIMIT
76: # endif
77:
1.6 ! noro 78: # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
! 79: || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1.1 noro 80: # define NEED_FIND_LIMIT
81: # endif
82:
83: #ifdef NEED_FIND_LIMIT
84: # include <setjmp.h>
85: #endif
86:
1.6 ! noro 87: #if defined(FREEBSD) && defined(I386)
1.1 noro 88: # include <machine/trap.h>
89: #endif
90:
91: #ifdef AMIGA
1.4 noro 92: # define GC_AMIGA_DEF
93: # include "AmigaOS.c"
94: # undef GC_AMIGA_DEF
1.1 noro 95: #endif
96:
1.4 noro 97: #if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 98: # define WIN32_LEAN_AND_MEAN
99: # define NOSERVICE
100: # include <windows.h>
101: #endif
102:
103: #ifdef MACOS
104: # include <Processes.h>
105: #endif
106:
107: #ifdef IRIX5
108: # include <sys/uio.h>
109: # include <malloc.h> /* for locking */
110: #endif
111: #ifdef USE_MMAP
112: # include <sys/types.h>
113: # include <sys/mman.h>
114: # include <sys/stat.h>
1.4 noro 115: #endif
116:
117: #ifdef UNIX_LIKE
1.1 noro 118: # include <fcntl.h>
119: #endif
120:
1.6 ! noro 121: #if defined(SUNOS5SIGS) || defined (HURD) || defined(LINUX)
! 122: # ifdef SUNOS5SIGS
! 123: # include <sys/siginfo.h>
! 124: # endif
1.1 noro 125: # undef setjmp
126: # undef longjmp
127: # define setjmp(env) sigsetjmp(env, 1)
128: # define longjmp(env, val) siglongjmp(env, val)
129: # define jmp_buf sigjmp_buf
130: #endif
131:
132: #ifdef DJGPP
1.4 noro 133: /* Apparently necessary for djgpp 2.01. May cause problems with */
1.1 noro 134: /* other versions. */
135: typedef long unsigned int caddr_t;
136: #endif
137:
138: #ifdef PCR
139: # include "il/PCR_IL.h"
140: # include "th/PCR_ThCtl.h"
141: # include "mm/PCR_MM.h"
142: #endif
143:
144: #if !defined(NO_EXECUTE_PERMISSION)
145: # define OPT_PROT_EXEC PROT_EXEC
146: #else
147: # define OPT_PROT_EXEC 0
148: #endif
149:
1.3 noro 150: #if defined(SEARCH_FOR_DATA_START)
1.1 noro 151: /* The I386 case can be handled without a search. The Alpha case */
152: /* used to be handled differently as well, but the rules changed */
153: /* for recent Linux versions. This seems to be the easiest way to */
154: /* cover all versions. */
1.4 noro 155:
156: # ifdef LINUX
157: # pragma weak __data_start
1.6 ! noro 158: extern int __data_start[];
1.4 noro 159: # pragma weak data_start
1.6 ! noro 160: extern int data_start[];
1.4 noro 161: # endif /* LINUX */
1.6 ! noro 162: extern int _end[];
1.4 noro 163:
1.1 noro 164: ptr_t GC_data_start;
165:
166: void GC_init_linux_data_start()
167: {
168: extern ptr_t GC_find_limit();
169:
1.4 noro 170: # ifdef LINUX
171: /* Try the easy approaches first: */
1.6 ! noro 172: if ((ptr_t)__data_start != 0) {
! 173: GC_data_start = (ptr_t)(__data_start);
1.4 noro 174: return;
175: }
1.6 ! noro 176: if ((ptr_t)data_start != 0) {
! 177: GC_data_start = (ptr_t)(data_start);
1.4 noro 178: return;
179: }
180: # endif /* LINUX */
1.6 ! noro 181: GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
1.4 noro 182: }
183: #endif
184:
1.6 ! noro 185: # ifdef ECOS
! 186:
! 187: # ifndef ECOS_GC_MEMORY_SIZE
! 188: # define ECOS_GC_MEMORY_SIZE (448 * 1024)
! 189: # endif /* ECOS_GC_MEMORY_SIZE */
! 190:
! 191: // setjmp() function, as described in ANSI para 7.6.1.1
! 192: #define setjmp( __env__ ) hal_setjmp( __env__ )
! 193:
! 194: // FIXME: This is a simple way of allocating memory which is
! 195: // compatible with ECOS early releases. Later releases use a more
! 196: // sophisticated means of allocating memory than this simple static
! 197: // allocator, but this method is at least bound to work.
! 198: static char memory[ECOS_GC_MEMORY_SIZE];
! 199: static char *brk = memory;
! 200:
! 201: static void *tiny_sbrk(ptrdiff_t increment)
! 202: {
! 203: void *p = brk;
! 204:
! 205: brk += increment;
! 206:
! 207: if (brk > memory + sizeof memory)
! 208: {
! 209: brk -= increment;
! 210: return NULL;
! 211: }
! 212:
! 213: return p;
! 214: }
! 215: #define sbrk tiny_sbrk
! 216: # endif /* ECOS */
! 217:
! 218: #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
1.4 noro 219: ptr_t GC_data_start;
220:
221: void GC_init_netbsd_elf()
222: {
223: extern ptr_t GC_find_limit();
224: extern char **environ;
225: /* This may need to be environ, without the underscore, for */
226: /* some versions. */
227: GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
1.1 noro 228: }
229: #endif
230:
231: # ifdef OS2
232:
233: # include <stddef.h>
234:
235: # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
236:
237: struct exe_hdr {
238: unsigned short magic_number;
239: unsigned short padding[29];
240: long new_exe_offset;
241: };
242:
243: #define E_MAGIC(x) (x).magic_number
244: #define EMAGIC 0x5A4D
245: #define E_LFANEW(x) (x).new_exe_offset
246:
247: struct e32_exe {
248: unsigned char magic_number[2];
249: unsigned char byte_order;
250: unsigned char word_order;
251: unsigned long exe_format_level;
252: unsigned short cpu;
253: unsigned short os;
254: unsigned long padding1[13];
255: unsigned long object_table_offset;
256: unsigned long object_count;
257: unsigned long padding2[31];
258: };
259:
260: #define E32_MAGIC1(x) (x).magic_number[0]
261: #define E32MAGIC1 'L'
262: #define E32_MAGIC2(x) (x).magic_number[1]
263: #define E32MAGIC2 'X'
264: #define E32_BORDER(x) (x).byte_order
265: #define E32LEBO 0
266: #define E32_WORDER(x) (x).word_order
267: #define E32LEWO 0
268: #define E32_CPU(x) (x).cpu
269: #define E32CPU286 1
270: #define E32_OBJTAB(x) (x).object_table_offset
271: #define E32_OBJCNT(x) (x).object_count
272:
273: struct o32_obj {
274: unsigned long size;
275: unsigned long base;
276: unsigned long flags;
277: unsigned long pagemap;
278: unsigned long mapsize;
279: unsigned long reserved;
280: };
281:
282: #define O32_FLAGS(x) (x).flags
283: #define OBJREAD 0x0001L
284: #define OBJWRITE 0x0002L
285: #define OBJINVALID 0x0080L
286: #define O32_SIZE(x) (x).size
287: #define O32_BASE(x) (x).base
288:
289: # else /* IBM's compiler */
290:
291: /* A kludge to get around what appears to be a header file bug */
292: # ifndef WORD
293: # define WORD unsigned short
294: # endif
295: # ifndef DWORD
296: # define DWORD unsigned long
297: # endif
298:
299: # define EXE386 1
300: # include <newexe.h>
301: # include <exe386.h>
302:
303: # endif /* __IBMC__ */
304:
305: # define INCL_DOSEXCEPTIONS
306: # define INCL_DOSPROCESS
307: # define INCL_DOSERRORS
308: # define INCL_DOSMODULEMGR
309: # define INCL_DOSMEMMGR
310: # include <os2.h>
311:
312:
313: /* Disable and enable signals during nontrivial allocations */
314:
315: void GC_disable_signals(void)
316: {
317: ULONG nest;
318:
319: DosEnterMustComplete(&nest);
320: if (nest != 1) ABORT("nested GC_disable_signals");
321: }
322:
323: void GC_enable_signals(void)
324: {
325: ULONG nest;
326:
327: DosExitMustComplete(&nest);
328: if (nest != 0) ABORT("GC_enable_signals");
329: }
330:
331:
332: # else
333:
334: # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
1.4 noro 335: && !defined(MSWINCE) \
1.6 ! noro 336: && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
! 337: && !defined(NOSYS) && !defined(ECOS)
1.1 noro 338:
1.6 ! noro 339: # if defined(sigmask) && !defined(UTS4) && !defined(HURD)
1.1 noro 340: /* Use the traditional BSD interface */
341: # define SIGSET_T int
342: # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
343: # define SIG_FILL(set) (set) = 0x7fffffff
344: /* Setting the leading bit appears to provoke a bug in some */
345: /* longjmp implementations. Most systems appear not to have */
346: /* a signal 32. */
347: # define SIGSETMASK(old, new) (old) = sigsetmask(new)
348: # else
349: /* Use POSIX/SYSV interface */
350: # define SIGSET_T sigset_t
351: # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
352: # define SIG_FILL(set) sigfillset(&set)
353: # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
354: # endif
355:
356: static GC_bool mask_initialized = FALSE;
357:
358: static SIGSET_T new_mask;
359:
360: static SIGSET_T old_mask;
361:
362: static SIGSET_T dummy;
363:
364: #if defined(PRINTSTATS) && !defined(THREADS)
365: # define CHECK_SIGNALS
366: int GC_sig_disabled = 0;
367: #endif
368:
369: void GC_disable_signals()
370: {
371: if (!mask_initialized) {
372: SIG_FILL(new_mask);
373:
374: SIG_DEL(new_mask, SIGSEGV);
375: SIG_DEL(new_mask, SIGILL);
376: SIG_DEL(new_mask, SIGQUIT);
377: # ifdef SIGBUS
378: SIG_DEL(new_mask, SIGBUS);
379: # endif
380: # ifdef SIGIOT
381: SIG_DEL(new_mask, SIGIOT);
382: # endif
383: # ifdef SIGEMT
384: SIG_DEL(new_mask, SIGEMT);
385: # endif
386: # ifdef SIGTRAP
387: SIG_DEL(new_mask, SIGTRAP);
388: # endif
389: mask_initialized = TRUE;
390: }
391: # ifdef CHECK_SIGNALS
392: if (GC_sig_disabled != 0) ABORT("Nested disables");
393: GC_sig_disabled++;
394: # endif
395: SIGSETMASK(old_mask,new_mask);
396: }
397:
398: void GC_enable_signals()
399: {
400: # ifdef CHECK_SIGNALS
401: if (GC_sig_disabled != 1) ABORT("Unmatched enable");
402: GC_sig_disabled--;
403: # endif
404: SIGSETMASK(dummy,old_mask);
405: }
406:
407: # endif /* !PCR */
408:
409: # endif /*!OS/2 */
410:
411: /* Ivan Demakov: simplest way (to me) */
1.6 ! noro 412: #if defined (DOS4GW)
1.1 noro 413: void GC_disable_signals() { }
414: void GC_enable_signals() { }
415: #endif
416:
417: /* Find the page size */
418: word GC_page_size;
419:
1.4 noro 420: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 421: void GC_setpagesize()
422: {
1.4 noro 423: GetSystemInfo(&GC_sysinfo);
424: GC_page_size = GC_sysinfo.dwPageSize;
1.1 noro 425: }
426:
427: # else
428: # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
429: || defined(USE_MUNMAP)
430: void GC_setpagesize()
431: {
432: GC_page_size = GETPAGESIZE();
433: }
434: # else
435: /* It's acceptable to fake it. */
436: void GC_setpagesize()
437: {
438: GC_page_size = HBLKSIZE;
439: }
440: # endif
441: # endif
442:
443: /*
444: * Find the base of the stack.
445: * Used only in single-threaded environment.
446: * With threads, GC_mark_roots needs to know how to do this.
447: * Called with allocator lock held.
448: */
1.4 noro 449: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 450: # define is_writable(prot) ((prot) == PAGE_READWRITE \
451: || (prot) == PAGE_WRITECOPY \
452: || (prot) == PAGE_EXECUTE_READWRITE \
453: || (prot) == PAGE_EXECUTE_WRITECOPY)
454: /* Return the number of bytes that are writable starting at p. */
455: /* The pointer p is assumed to be page aligned. */
456: /* If base is not 0, *base becomes the beginning of the */
457: /* allocation region containing p. */
458: word GC_get_writable_length(ptr_t p, ptr_t *base)
459: {
460: MEMORY_BASIC_INFORMATION buf;
461: word result;
462: word protect;
463:
464: result = VirtualQuery(p, &buf, sizeof(buf));
465: if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
466: if (base != 0) *base = (ptr_t)(buf.AllocationBase);
467: protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
468: if (!is_writable(protect)) {
469: return(0);
470: }
471: if (buf.State != MEM_COMMIT) return(0);
472: return(buf.RegionSize);
473: }
474:
475: ptr_t GC_get_stack_base()
476: {
477: int dummy;
478: ptr_t sp = (ptr_t)(&dummy);
479: ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
480: word size = GC_get_writable_length(trunc_sp, 0);
481:
482: return(trunc_sp + size);
483: }
484:
485:
1.4 noro 486: # endif /* MS Windows */
487:
488: # ifdef BEOS
489: # include <kernel/OS.h>
490: ptr_t GC_get_stack_base(){
491: thread_info th;
492: get_thread_info(find_thread(NULL),&th);
493: return th.stack_end;
494: }
495: # endif /* BEOS */
496:
1.1 noro 497:
498: # ifdef OS2
499:
500: ptr_t GC_get_stack_base()
501: {
502: PTIB ptib;
503: PPIB ppib;
504:
505: if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
506: GC_err_printf0("DosGetInfoBlocks failed\n");
507: ABORT("DosGetInfoBlocks failed\n");
508: }
509: return((ptr_t)(ptib -> tib_pstacklimit));
510: }
511:
1.4 noro 512: # endif /* OS2 */
1.1 noro 513:
514: # ifdef AMIGA
1.4 noro 515: # define GC_AMIGA_SB
516: # include "AmigaOS.c"
517: # undef GC_AMIGA_SB
518: # endif /* AMIGA */
1.1 noro 519:
1.4 noro 520: # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
1.1 noro 521:
522: # ifdef __STDC__
523: typedef void (*handler)(int);
524: # else
525: typedef void (*handler)();
526: # endif
527:
1.6 ! noro 528: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) || defined(HURD)
1.1 noro 529: static struct sigaction old_segv_act;
1.6 ! noro 530: # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) || defined(HURD)
1.1 noro 531: static struct sigaction old_bus_act;
532: # endif
533: # else
534: static handler old_segv_handler, old_bus_handler;
535: # endif
536:
1.4 noro 537: # ifdef __STDC__
538: void GC_set_and_save_fault_handler(handler h)
539: # else
540: void GC_set_and_save_fault_handler(h)
541: handler h;
542: # endif
1.1 noro 543: {
1.6 ! noro 544: # if defined(SUNOS5SIGS) || defined(IRIX5) \
! 545: || defined(OSF1) || defined(HURD)
1.1 noro 546: struct sigaction act;
547:
1.4 noro 548: act.sa_handler = h;
1.6 ! noro 549: # ifdef SUNOS5SIGS
! 550: act.sa_flags = SA_RESTART | SA_NODEFER;
! 551: # else
! 552: act.sa_flags = SA_RESTART;
! 553: # endif
1.1 noro 554: /* The presence of SA_NODEFER represents yet another gross */
555: /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
556: /* interact correctly with -lthread. We hide the confusion */
557: /* by making sure that signal handling doesn't affect the */
558: /* signal mask. */
559:
560: (void) sigemptyset(&act.sa_mask);
1.6 ! noro 561: # ifdef GC_IRIX_THREADS
1.1 noro 562: /* Older versions have a bug related to retrieving and */
563: /* and setting a handler at the same time. */
564: (void) sigaction(SIGSEGV, 0, &old_segv_act);
565: (void) sigaction(SIGSEGV, &act, 0);
566: # else
567: (void) sigaction(SIGSEGV, &act, &old_segv_act);
1.2 noro 568: # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
1.6 ! noro 569: || defined(HPUX) || defined(HURD)
1.2 noro 570: /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
571: /* Pthreads doesn't exist under Irix 5.x, so we */
572: /* don't have to worry in the threads case. */
1.1 noro 573: (void) sigaction(SIGBUS, &act, &old_bus_act);
574: # endif
1.6 ! noro 575: # endif /* GC_IRIX_THREADS */
1.1 noro 576: # else
1.4 noro 577: old_segv_handler = signal(SIGSEGV, h);
1.1 noro 578: # ifdef SIGBUS
1.4 noro 579: old_bus_handler = signal(SIGBUS, h);
1.1 noro 580: # endif
581: # endif
582: }
1.4 noro 583: # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
584:
585: # ifdef NEED_FIND_LIMIT
586: /* Some tools to implement HEURISTIC2 */
587: # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
588: /* static */ jmp_buf GC_jmp_buf;
589:
590: /*ARGSUSED*/
591: void GC_fault_handler(sig)
592: int sig;
593: {
594: longjmp(GC_jmp_buf, 1);
595: }
596:
597: void GC_setup_temporary_fault_handler()
598: {
599: GC_set_and_save_fault_handler(GC_fault_handler);
600: }
1.1 noro 601:
602: void GC_reset_fault_handler()
603: {
1.6 ! noro 604: # if defined(SUNOS5SIGS) || defined(IRIX5) \
! 605: || defined(OSF1) || defined(HURD)
1.1 noro 606: (void) sigaction(SIGSEGV, &old_segv_act, 0);
1.2 noro 607: # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
1.6 ! noro 608: || defined(HPUX) || defined(HURD)
1.1 noro 609: (void) sigaction(SIGBUS, &old_bus_act, 0);
610: # endif
611: # else
612: (void) signal(SIGSEGV, old_segv_handler);
613: # ifdef SIGBUS
614: (void) signal(SIGBUS, old_bus_handler);
615: # endif
616: # endif
617: }
618:
619: /* Return the first nonaddressible location > p (up) or */
620: /* the smallest location q s.t. [q,p] is addressible (!up). */
621: ptr_t GC_find_limit(p, up)
622: ptr_t p;
623: GC_bool up;
624: {
625: static VOLATILE ptr_t result;
626: /* Needs to be static, since otherwise it may not be */
627: /* preserved across the longjmp. Can safely be */
628: /* static since it's only called once, with the */
629: /* allocation lock held. */
630:
631:
632: GC_setup_temporary_fault_handler();
633: if (setjmp(GC_jmp_buf) == 0) {
634: result = (ptr_t)(((word)(p))
635: & ~(MIN_PAGE_SIZE-1));
636: for (;;) {
637: if (up) {
638: result += MIN_PAGE_SIZE;
639: } else {
640: result -= MIN_PAGE_SIZE;
641: }
642: GC_noop1((word)(*result));
643: }
644: }
645: GC_reset_fault_handler();
646: if (!up) {
647: result += MIN_PAGE_SIZE;
648: }
649: return(result);
650: }
651: # endif
652:
1.6 ! noro 653: #if defined(ECOS) || defined(NOSYS)
! 654: ptr_t GC_get_stack_base()
! 655: {
! 656: return STACKBOTTOM;
! 657: }
! 658: #endif
! 659:
1.2 noro 660: #ifdef LINUX_STACKBOTTOM
661:
1.3 noro 662: #include <sys/types.h>
663: #include <sys/stat.h>
664:
1.2 noro 665: # define STAT_SKIP 27 /* Number of fields preceding startstack */
1.3 noro 666: /* field in /proc/self/stat */
1.2 noro 667:
1.4 noro 668: # pragma weak __libc_stack_end
669: extern ptr_t __libc_stack_end;
670:
671: # ifdef IA64
672: # pragma weak __libc_ia64_register_backing_store_base
673: extern ptr_t __libc_ia64_register_backing_store_base;
674:
675: ptr_t GC_get_register_stack_base(void)
676: {
1.6 ! noro 677: if (0 != &__libc_ia64_register_backing_store_base
! 678: && 0 != __libc_ia64_register_backing_store_base) {
! 679: /* Glibc 2.2.4 has a bug such that for dynamically linked */
! 680: /* executables __libc_ia64_register_backing_store_base is */
! 681: /* defined but ininitialized during constructor calls. */
! 682: /* Hence we check for both nonzero address and value. */
1.4 noro 683: return __libc_ia64_register_backing_store_base;
684: } else {
685: word result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
686: result += BACKING_STORE_ALIGNMENT - 1;
687: result &= ~(BACKING_STORE_ALIGNMENT - 1);
688: return (ptr_t)result;
689: }
690: }
691: # endif
692:
1.2 noro 693: ptr_t GC_linux_stack_base(void)
694: {
1.3 noro 695: /* We read the stack base value from /proc/self/stat. We do this */
696: /* using direct I/O system calls in order to avoid calling malloc */
697: /* in case REDIRECT_MALLOC is defined. */
698: # define STAT_BUF_SIZE 4096
1.4 noro 699: # if defined(GC_USE_LD_WRAP)
1.3 noro 700: # define STAT_READ __real_read
701: # else
702: # define STAT_READ read
703: # endif
704: char stat_buf[STAT_BUF_SIZE];
705: int f;
1.2 noro 706: char c;
707: word result = 0;
1.3 noro 708: size_t i, buf_offset = 0;
1.2 noro 709:
1.4 noro 710: /* First try the easy way. This should work for glibc 2.2 */
711: if (0 != &__libc_stack_end) {
1.6 ! noro 712: # ifdef IA64
! 713: /* Some versions of glibc set the address 16 bytes too */
! 714: /* low while the initialization code is running. */
! 715: if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
! 716: return __libc_stack_end + 0x10;
! 717: } /* Otherwise it's not safe to add 16 bytes and we fall */
! 718: /* back to using /proc. */
! 719: # else
! 720: return __libc_stack_end;
! 721: # endif
1.4 noro 722: }
1.3 noro 723: f = open("/proc/self/stat", O_RDONLY);
724: if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
725: ABORT("Couldn't read /proc/self/stat");
726: }
727: c = stat_buf[buf_offset++];
1.2 noro 728: /* Skip the required number of fields. This number is hopefully */
729: /* constant across all Linux implementations. */
730: for (i = 0; i < STAT_SKIP; ++i) {
1.3 noro 731: while (isspace(c)) c = stat_buf[buf_offset++];
732: while (!isspace(c)) c = stat_buf[buf_offset++];
1.2 noro 733: }
1.3 noro 734: while (isspace(c)) c = stat_buf[buf_offset++];
1.2 noro 735: while (isdigit(c)) {
736: result *= 10;
737: result += c - '0';
1.3 noro 738: c = stat_buf[buf_offset++];
1.2 noro 739: }
1.3 noro 740: close(f);
1.2 noro 741: if (result < 0x10000000) ABORT("Absurd stack bottom value");
742: return (ptr_t)result;
743: }
744:
745: #endif /* LINUX_STACKBOTTOM */
1.1 noro 746:
1.4 noro 747: #ifdef FREEBSD_STACKBOTTOM
748:
749: /* This uses an undocumented sysctl call, but at least one expert */
750: /* believes it will stay. */
751:
752: #include <unistd.h>
753: #include <sys/types.h>
754: #include <sys/sysctl.h>
755:
756: ptr_t GC_freebsd_stack_base(void)
757: {
1.6 ! noro 758: int nm[2] = {CTL_KERN, KERN_USRSTACK};
! 759: ptr_t base;
! 760: size_t len = sizeof(ptr_t);
! 761: int r = sysctl(nm, 2, &base, &len, NULL, 0);
1.4 noro 762:
763: if (r) ABORT("Error getting stack base");
764:
1.6 ! noro 765: return base;
1.4 noro 766: }
767:
768: #endif /* FREEBSD_STACKBOTTOM */
769:
770: #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
771: && !defined(MSWINCE) && !defined(OS2)
772:
1.1 noro 773: ptr_t GC_get_stack_base()
774: {
775: word dummy;
776: ptr_t result;
777:
778: # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
779:
780: # ifdef STACKBOTTOM
781: return(STACKBOTTOM);
782: # else
783: # ifdef HEURISTIC1
784: # ifdef STACK_GROWS_DOWN
785: result = (ptr_t)((((word)(&dummy))
786: + STACKBOTTOM_ALIGNMENT_M1)
787: & ~STACKBOTTOM_ALIGNMENT_M1);
788: # else
789: result = (ptr_t)(((word)(&dummy))
790: & ~STACKBOTTOM_ALIGNMENT_M1);
791: # endif
792: # endif /* HEURISTIC1 */
1.2 noro 793: # ifdef LINUX_STACKBOTTOM
794: result = GC_linux_stack_base();
795: # endif
1.4 noro 796: # ifdef FREEBSD_STACKBOTTOM
797: result = GC_freebsd_stack_base();
798: # endif
1.1 noro 799: # ifdef HEURISTIC2
800: # ifdef STACK_GROWS_DOWN
801: result = GC_find_limit((ptr_t)(&dummy), TRUE);
802: # ifdef HEURISTIC2_LIMIT
803: if (result > HEURISTIC2_LIMIT
804: && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
805: result = HEURISTIC2_LIMIT;
806: }
807: # endif
808: # else
809: result = GC_find_limit((ptr_t)(&dummy), FALSE);
810: # ifdef HEURISTIC2_LIMIT
811: if (result < HEURISTIC2_LIMIT
812: && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
813: result = HEURISTIC2_LIMIT;
814: }
815: # endif
816: # endif
817:
818: # endif /* HEURISTIC2 */
819: # ifdef STACK_GROWS_DOWN
820: if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
821: # endif
822: return(result);
823: # endif /* STACKBOTTOM */
824: }
825:
1.4 noro 826: # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS */
1.1 noro 827:
828: /*
829: * Register static data segment(s) as roots.
830: * If more data segments are added later then they need to be registered
831: * add that point (as we do with SunOS dynamic loading),
832: * or GC_mark_roots needs to check for them (as we do with PCR).
833: * Called with allocator lock held.
834: */
835:
836: # ifdef OS2
837:
838: void GC_register_data_segments()
839: {
840: PTIB ptib;
841: PPIB ppib;
842: HMODULE module_handle;
843: # define PBUFSIZ 512
844: UCHAR path[PBUFSIZ];
845: FILE * myexefile;
846: struct exe_hdr hdrdos; /* MSDOS header. */
847: struct e32_exe hdr386; /* Real header for my executable */
848: struct o32_obj seg; /* Currrent segment */
849: int nsegs;
850:
851:
852: if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
853: GC_err_printf0("DosGetInfoBlocks failed\n");
854: ABORT("DosGetInfoBlocks failed\n");
855: }
856: module_handle = ppib -> pib_hmte;
857: if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
858: GC_err_printf0("DosQueryModuleName failed\n");
859: ABORT("DosGetInfoBlocks failed\n");
860: }
861: myexefile = fopen(path, "rb");
862: if (myexefile == 0) {
863: GC_err_puts("Couldn't open executable ");
864: GC_err_puts(path); GC_err_puts("\n");
865: ABORT("Failed to open executable\n");
866: }
867: if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
868: GC_err_puts("Couldn't read MSDOS header from ");
869: GC_err_puts(path); GC_err_puts("\n");
870: ABORT("Couldn't read MSDOS header");
871: }
872: if (E_MAGIC(hdrdos) != EMAGIC) {
873: GC_err_puts("Executable has wrong DOS magic number: ");
874: GC_err_puts(path); GC_err_puts("\n");
875: ABORT("Bad DOS magic number");
876: }
877: if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
878: GC_err_puts("Seek to new header failed in ");
879: GC_err_puts(path); GC_err_puts("\n");
880: ABORT("Bad DOS magic number");
881: }
882: if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
883: GC_err_puts("Couldn't read MSDOS header from ");
884: GC_err_puts(path); GC_err_puts("\n");
885: ABORT("Couldn't read OS/2 header");
886: }
887: if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
888: GC_err_puts("Executable has wrong OS/2 magic number:");
889: GC_err_puts(path); GC_err_puts("\n");
890: ABORT("Bad OS/2 magic number");
891: }
892: if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
893: GC_err_puts("Executable %s has wrong byte order: ");
894: GC_err_puts(path); GC_err_puts("\n");
895: ABORT("Bad byte order");
896: }
897: if ( E32_CPU(hdr386) == E32CPU286) {
898: GC_err_puts("GC can't handle 80286 executables: ");
899: GC_err_puts(path); GC_err_puts("\n");
900: EXIT();
901: }
902: if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
903: SEEK_SET) != 0) {
904: GC_err_puts("Seek to object table failed: ");
905: GC_err_puts(path); GC_err_puts("\n");
906: ABORT("Seek to object table failed");
907: }
908: for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
909: int flags;
910: if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
911: GC_err_puts("Couldn't read obj table entry from ");
912: GC_err_puts(path); GC_err_puts("\n");
913: ABORT("Couldn't read obj table entry");
914: }
915: flags = O32_FLAGS(seg);
916: if (!(flags & OBJWRITE)) continue;
917: if (!(flags & OBJREAD)) continue;
918: if (flags & OBJINVALID) {
919: GC_err_printf0("Object with invalid pages?\n");
920: continue;
921: }
922: GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
923: }
924: }
925:
1.4 noro 926: # else /* !OS2 */
927:
928: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 929:
930: # ifdef MSWIN32
931: /* Unfortunately, we have to handle win32s very differently from NT, */
932: /* Since VirtualQuery has very different semantics. In particular, */
933: /* under win32s a VirtualQuery call on an unmapped page returns an */
1.6 ! noro 934: /* invalid result. Under NT, GC_register_data_segments is a noop and */
1.1 noro 935: /* all real work is done by GC_register_dynamic_libraries. Under */
936: /* win32s, we cannot find the data segments associated with dll's. */
937: /* We rgister the main data segment here. */
1.6 ! noro 938: # ifdef __GCC__
! 939: GC_bool GC_no_win32_dlls = TRUE; /* GCC can't do SEH, so we can't use VirtualQuery */
! 940: # else
! 941: GC_bool GC_no_win32_dlls = FALSE;
! 942: # endif
1.1 noro 943:
944: void GC_init_win32()
945: {
1.6 ! noro 946: /* if we're running under win32s, assume that no DLLs will be loaded */
! 947: DWORD v = GetVersion();
! 948: GC_no_win32_dlls |= ((v & 0x80000000) && (v & 0xff) <= 3);
1.1 noro 949: }
1.4 noro 950:
1.1 noro 951: /* Return the smallest address a such that VirtualQuery */
952: /* returns correct results for all addresses between a and start. */
953: /* Assumes VirtualQuery returns correct information for start. */
954: ptr_t GC_least_described_address(ptr_t start)
955: {
956: MEMORY_BASIC_INFORMATION buf;
957: DWORD result;
958: LPVOID limit;
959: ptr_t p;
960: LPVOID q;
961:
1.4 noro 962: limit = GC_sysinfo.lpMinimumApplicationAddress;
1.1 noro 963: p = (ptr_t)((word)start & ~(GC_page_size - 1));
964: for (;;) {
965: q = (LPVOID)(p - GC_page_size);
966: if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
967: result = VirtualQuery(q, &buf, sizeof(buf));
968: if (result != sizeof(buf) || buf.AllocationBase == 0) break;
969: p = (ptr_t)(buf.AllocationBase);
970: }
971: return(p);
972: }
1.4 noro 973: # endif
1.1 noro 974:
975: /* Is p the start of either the malloc heap, or of one of our */
976: /* heap sections? */
977: GC_bool GC_is_heap_base (ptr_t p)
978: {
979:
980: register unsigned i;
981:
982: # ifndef REDIRECT_MALLOC
983: static ptr_t malloc_heap_pointer = 0;
984:
985: if (0 == malloc_heap_pointer) {
986: MEMORY_BASIC_INFORMATION buf;
1.4 noro 987: void *pTemp = malloc( 1 );
988: register DWORD result = VirtualQuery(pTemp, &buf, sizeof(buf));
989:
990: free( pTemp );
991:
1.1 noro 992:
993: if (result != sizeof(buf)) {
994: ABORT("Weird VirtualQuery result");
995: }
996: malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
997: }
998: if (p == malloc_heap_pointer) return(TRUE);
999: # endif
1000: for (i = 0; i < GC_n_heap_bases; i++) {
1001: if (GC_heap_bases[i] == p) return(TRUE);
1002: }
1003: return(FALSE);
1004: }
1.4 noro 1005:
1006: # ifdef MSWIN32
1.1 noro 1007: void GC_register_root_section(ptr_t static_root)
1008: {
1009: MEMORY_BASIC_INFORMATION buf;
1010: DWORD result;
1011: DWORD protect;
1012: LPVOID p;
1013: char * base;
1014: char * limit, * new_limit;
1015:
1.6 ! noro 1016: if (!GC_no_win32_dlls) return;
1.1 noro 1017: p = base = limit = GC_least_described_address(static_root);
1.4 noro 1018: while (p < GC_sysinfo.lpMaximumApplicationAddress) {
1.1 noro 1019: result = VirtualQuery(p, &buf, sizeof(buf));
1020: if (result != sizeof(buf) || buf.AllocationBase == 0
1021: || GC_is_heap_base(buf.AllocationBase)) break;
1022: new_limit = (char *)p + buf.RegionSize;
1023: protect = buf.Protect;
1024: if (buf.State == MEM_COMMIT
1025: && is_writable(protect)) {
1026: if ((char *)p == limit) {
1027: limit = new_limit;
1028: } else {
1029: if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1030: base = p;
1031: limit = new_limit;
1032: }
1033: }
1034: if (p > (LPVOID)new_limit /* overflow */) break;
1035: p = (LPVOID)new_limit;
1036: }
1037: if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1038: }
1.4 noro 1039: #endif
1.1 noro 1040:
1041: void GC_register_data_segments()
1042: {
1.4 noro 1043: # ifdef MSWIN32
1.1 noro 1044: static char dummy;
1045: GC_register_root_section((ptr_t)(&dummy));
1046: # endif
1047: }
1048:
1.4 noro 1049: # else /* !OS2 && !Windows */
1.1 noro 1050:
1.4 noro 1051: # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1052: || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1.1 noro 1053: char * GC_SysVGetDataStart(max_page_size, etext_addr)
1054: int max_page_size;
1055: int * etext_addr;
1056: {
1057: word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1058: & ~(sizeof(word) - 1);
1059: /* etext rounded to word boundary */
1060: word next_page = ((text_end + (word)max_page_size - 1)
1061: & ~((word)max_page_size - 1));
1062: word page_offset = (text_end & ((word)max_page_size - 1));
1063: VOLATILE char * result = (char *)(next_page + page_offset);
1064: /* Note that this isnt equivalent to just adding */
1065: /* max_page_size to &etext if &etext is at a page boundary */
1066:
1067: GC_setup_temporary_fault_handler();
1068: if (setjmp(GC_jmp_buf) == 0) {
1069: /* Try writing to the address. */
1070: *result = *result;
1071: GC_reset_fault_handler();
1072: } else {
1073: GC_reset_fault_handler();
1074: /* We got here via a longjmp. The address is not readable. */
1075: /* This is known to happen under Solaris 2.4 + gcc, which place */
1076: /* string constants in the text segment, but after etext. */
1077: /* Use plan B. Note that we now know there is a gap between */
1078: /* text and data segments, so plan A bought us something. */
1079: result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
1080: }
1081: return((char *)result);
1082: }
1083: # endif
1084:
1085:
1.4 noro 1086: #ifdef AMIGA
1087:
1088: # define GC_AMIGA_DS
1089: # include "AmigaOS.c"
1090: # undef GC_AMIGA_DS
1091:
1092: #else /* !OS2 && !Windows && !AMIGA */
1093:
1.1 noro 1094: void GC_register_data_segments()
1095: {
1096: # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1097: && !defined(MACOSX)
1.6 ! noro 1098: # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1.1 noro 1099: /* As of Solaris 2.3, the Solaris threads implementation */
1100: /* allocates the data structure for the initial thread with */
1101: /* sbrk at process startup. It needs to be scanned, so that */
1102: /* we don't lose some malloc allocated data structures */
1103: /* hanging from it. We're on thin ice here ... */
1104: extern caddr_t sbrk();
1105:
1106: GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1107: # else
1108: GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1.6 ! noro 1109: # if defined(DATASTART2)
! 1110: GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
! 1111: # endif
1.1 noro 1112: # endif
1113: # endif
1114: # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1115: GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
1116: # endif
1117: # if defined(MACOS)
1118: {
1119: # if defined(THINK_C)
1120: extern void* GC_MacGetDataStart(void);
1121: /* globals begin above stack and end at a5. */
1122: GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1123: (ptr_t)LMGetCurrentA5(), FALSE);
1124: # else
1125: # if defined(__MWERKS__)
1126: # if !__POWERPC__
1127: extern void* GC_MacGetDataStart(void);
1128: /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1129: # if __option(far_data)
1130: extern void* GC_MacGetDataEnd(void);
1131: # endif
1132: /* globals begin above stack and end at a5. */
1133: GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1134: (ptr_t)LMGetCurrentA5(), FALSE);
1135: /* MATTHEW: Handle Far Globals */
1136: # if __option(far_data)
1137: /* Far globals follow he QD globals: */
1138: GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1139: (ptr_t)GC_MacGetDataEnd(), FALSE);
1140: # endif
1141: # else
1142: extern char __data_start__[], __data_end__[];
1143: GC_add_roots_inner((ptr_t)&__data_start__,
1144: (ptr_t)&__data_end__, FALSE);
1145: # endif /* __POWERPC__ */
1146: # endif /* __MWERKS__ */
1147: # endif /* !THINK_C */
1148: }
1149: # endif /* MACOS */
1150:
1151: /* Dynamic libraries are added at every collection, since they may */
1152: /* change. */
1153: }
1154:
1155: # endif /* ! AMIGA */
1.4 noro 1156: # endif /* ! MSWIN32 && ! MSWINCE*/
1.1 noro 1157: # endif /* ! OS2 */
1158:
1159: /*
1160: * Auxiliary routines for obtaining memory from OS.
1161: */
1.4 noro 1162:
1.1 noro 1163: # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1.4 noro 1164: && !defined(MSWIN32) && !defined(MSWINCE) \
1165: && !defined(MACOS) && !defined(DOS4GW)
1.1 noro 1166:
1167: # ifdef SUNOS4
1168: extern caddr_t sbrk();
1169: # endif
1170: # ifdef __STDC__
1171: # define SBRK_ARG_T ptrdiff_t
1172: # else
1173: # define SBRK_ARG_T int
1174: # endif
1175:
1.4 noro 1176:
1.1 noro 1177: # ifdef RS6000
1178: /* The compiler seems to generate speculative reads one past the end of */
1179: /* an allocated object. Hence we need to make sure that the page */
1180: /* following the last heap page is also mapped. */
1181: ptr_t GC_unix_get_mem(bytes)
1182: word bytes;
1183: {
1184: caddr_t cur_brk = (caddr_t)sbrk(0);
1185: caddr_t result;
1186: SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1187: static caddr_t my_brk_val = 0;
1188:
1189: if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1190: if (lsbs != 0) {
1191: if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1192: }
1193: if (cur_brk == my_brk_val) {
1194: /* Use the extra block we allocated last time. */
1195: result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1196: if (result == (caddr_t)(-1)) return(0);
1197: result -= GC_page_size;
1198: } else {
1199: result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1200: if (result == (caddr_t)(-1)) return(0);
1201: }
1202: my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1203: return((ptr_t)result);
1204: }
1205:
1206: #else /* Not RS6000 */
1207:
1208: #if defined(USE_MMAP)
1.4 noro 1209: /* Tested only under Linux, IRIX5 and Solaris 2 */
1.1 noro 1210:
1211: #ifdef USE_MMAP_FIXED
1212: # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1213: /* Seems to yield better performance on Solaris 2, but can */
1214: /* be unreliable if something is already mapped at the address. */
1215: #else
1216: # define GC_MMAP_FLAGS MAP_PRIVATE
1217: #endif
1218:
1.4 noro 1219: #ifndef HEAP_START
1220: # define HEAP_START 0
1221: #endif
1222:
1.1 noro 1223: ptr_t GC_unix_get_mem(bytes)
1224: word bytes;
1225: {
1226: static GC_bool initialized = FALSE;
1227: static int fd;
1228: void *result;
1229: static ptr_t last_addr = HEAP_START;
1230:
1231: if (!initialized) {
1232: fd = open("/dev/zero", O_RDONLY);
1233: initialized = TRUE;
1234: }
1235: if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1236: result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1237: GC_MMAP_FLAGS, fd, 0/* offset */);
1238: if (result == MAP_FAILED) return(0);
1239: last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1240: last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1.4 noro 1241: # if !defined(LINUX)
1242: if (last_addr == 0) {
1243: /* Oops. We got the end of the address space. This isn't */
1244: /* usable by arbitrary C code, since one-past-end pointers */
1245: /* don't work, so we discard it and try again. */
1246: munmap(result, (size_t)(-GC_page_size) - (size_t)result);
1247: /* Leave last page mapped, so we can't repeat. */
1248: return GC_unix_get_mem(bytes);
1249: }
1250: # else
1251: GC_ASSERT(last_addr != 0);
1252: # endif
1.1 noro 1253: return((ptr_t)result);
1254: }
1255:
1256: #else /* Not RS6000, not USE_MMAP */
1257: ptr_t GC_unix_get_mem(bytes)
1258: word bytes;
1259: {
1260: ptr_t result;
1261: # ifdef IRIX5
1262: /* Bare sbrk isn't thread safe. Play by malloc rules. */
1263: /* The equivalent may be needed on other systems as well. */
1264: __LOCK_MALLOC();
1265: # endif
1266: {
1267: ptr_t cur_brk = (ptr_t)sbrk(0);
1268: SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1269:
1270: if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1271: if (lsbs != 0) {
1272: if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1273: }
1274: result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1275: if (result == (ptr_t)(-1)) result = 0;
1276: }
1277: # ifdef IRIX5
1278: __UNLOCK_MALLOC();
1279: # endif
1280: return(result);
1281: }
1282:
1283: #endif /* Not USE_MMAP */
1284: #endif /* Not RS6000 */
1285:
1286: # endif /* UN*X */
1287:
1288: # ifdef OS2
1289:
1290: void * os2_alloc(size_t bytes)
1291: {
1292: void * result;
1293:
1294: if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1295: PAG_WRITE | PAG_COMMIT)
1296: != NO_ERROR) {
1297: return(0);
1298: }
1299: if (result == 0) return(os2_alloc(bytes));
1300: return(result);
1301: }
1302:
1303: # endif /* OS2 */
1304:
1305:
1.4 noro 1306: # if defined(MSWIN32) || defined(MSWINCE)
1307: SYSTEM_INFO GC_sysinfo;
1308: # endif
1309:
1.6 ! noro 1310: # ifdef MSWIN32
! 1311:
! 1312: # ifdef USE_GLOBAL_ALLOC
! 1313: # define GLOBAL_ALLOC_TEST 1
! 1314: # else
! 1315: # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
! 1316: # endif
1.4 noro 1317:
1.1 noro 1318: word GC_n_heap_bases = 0;
1319:
1320: ptr_t GC_win32_get_mem(bytes)
1321: word bytes;
1322: {
1323: ptr_t result;
1.4 noro 1324:
1.6 ! noro 1325: if (GLOBAL_ALLOC_TEST) {
1.1 noro 1326: /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1327: /* There are also unconfirmed rumors of other */
1328: /* problems, so we dodge the issue. */
1329: result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1330: result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1331: } else {
1.6 ! noro 1332: /* VirtualProtect only works on regions returned by a */
! 1333: /* single VirtualAlloc call. Thus we allocate one */
! 1334: /* extra page, which will prevent merging of blocks */
! 1335: /* in separate regions, and eliminate any temptation */
! 1336: /* to call VirtualProtect on a range spanning regions. */
! 1337: /* This wastes a small amount of memory, and risks */
! 1338: /* increased fragmentation. But better alternatives */
! 1339: /* would require effort. */
! 1340: result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
1.1 noro 1341: MEM_COMMIT | MEM_RESERVE,
1342: PAGE_EXECUTE_READWRITE);
1343: }
1344: if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1345: /* If I read the documentation correctly, this can */
1346: /* only happen if HBLKSIZE > 64k or not a power of 2. */
1347: if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1348: GC_heap_bases[GC_n_heap_bases++] = result;
1349: return(result);
1350: }
1351:
1352: void GC_win32_free_heap ()
1353: {
1.6 ! noro 1354: if (GC_no_win32_dlls) {
1.1 noro 1355: while (GC_n_heap_bases > 0) {
1356: GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1357: GC_heap_bases[GC_n_heap_bases] = 0;
1358: }
1359: }
1360: }
1.4 noro 1361: # endif
1362:
1363: #ifdef AMIGA
1364: # define GC_AMIGA_AM
1365: # include "AmigaOS.c"
1366: # undef GC_AMIGA_AM
1367: #endif
1.1 noro 1368:
1369:
1.4 noro 1370: # ifdef MSWINCE
1371: word GC_n_heap_bases = 0;
1372:
1373: ptr_t GC_wince_get_mem(bytes)
1374: word bytes;
1375: {
1376: ptr_t result;
1377: word i;
1378:
1379: /* Round up allocation size to multiple of page size */
1380: bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
1381:
1382: /* Try to find reserved, uncommitted pages */
1383: for (i = 0; i < GC_n_heap_bases; i++) {
1384: if (((word)(-(signed_word)GC_heap_lengths[i])
1385: & (GC_sysinfo.dwAllocationGranularity-1))
1386: >= bytes) {
1387: result = GC_heap_bases[i] + GC_heap_lengths[i];
1388: break;
1389: }
1390: }
1391:
1392: if (i == GC_n_heap_bases) {
1393: /* Reserve more pages */
1394: word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
1395: & ~(GC_sysinfo.dwAllocationGranularity-1);
1.6 ! noro 1396: /* If we ever support MPROTECT_VDB here, we will probably need to */
! 1397: /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
! 1398: /* never spans regions. It seems to be OK for a VirtualFree argument */
! 1399: /* to span regions, so we should be OK for now. */
1.4 noro 1400: result = (ptr_t) VirtualAlloc(NULL, res_bytes,
1401: MEM_RESERVE | MEM_TOP_DOWN,
1402: PAGE_EXECUTE_READWRITE);
1403: if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1404: /* If I read the documentation correctly, this can */
1405: /* only happen if HBLKSIZE > 64k or not a power of 2. */
1406: if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1407: GC_heap_bases[GC_n_heap_bases] = result;
1408: GC_heap_lengths[GC_n_heap_bases] = 0;
1409: GC_n_heap_bases++;
1410: }
1411:
1412: /* Commit pages */
1413: result = (ptr_t) VirtualAlloc(result, bytes,
1414: MEM_COMMIT,
1415: PAGE_EXECUTE_READWRITE);
1416: if (result != NULL) {
1417: if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1418: GC_heap_lengths[i] += bytes;
1419: }
1420:
1421: return(result);
1422: }
1.1 noro 1423: # endif
1424:
1425: #ifdef USE_MUNMAP
1426:
1.4 noro 1427: /* For now, this only works on Win32/WinCE and some Unix-like */
1428: /* systems. If you have something else, don't define */
1429: /* USE_MUNMAP. */
1.1 noro 1430: /* We assume ANSI C to support this feature. */
1.4 noro 1431:
1432: #if !defined(MSWIN32) && !defined(MSWINCE)
1433:
1.1 noro 1434: #include <unistd.h>
1435: #include <sys/mman.h>
1436: #include <sys/stat.h>
1437: #include <sys/types.h>
1.4 noro 1438:
1439: #endif
1.1 noro 1440:
1441: /* Compute a page aligned starting address for the unmap */
1442: /* operation on a block of size bytes starting at start. */
1443: /* Return 0 if the block is too small to make this feasible. */
1444: ptr_t GC_unmap_start(ptr_t start, word bytes)
1445: {
1446: ptr_t result = start;
1447: /* Round start to next page boundary. */
1448: result += GC_page_size - 1;
1449: result = (ptr_t)((word)result & ~(GC_page_size - 1));
1450: if (result + GC_page_size > start + bytes) return 0;
1451: return result;
1452: }
1453:
1454: /* Compute end address for an unmap operation on the indicated */
1455: /* block. */
1456: ptr_t GC_unmap_end(ptr_t start, word bytes)
1457: {
1458: ptr_t end_addr = start + bytes;
1459: end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1460: return end_addr;
1461: }
1462:
1.4 noro 1463: /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1464: /* memory using VirtualAlloc and VirtualFree. These functions */
1465: /* work on individual allocations of virtual memory, made */
1466: /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1467: /* The ranges we need to (de)commit may span several of these */
1468: /* allocations; therefore we use VirtualQuery to check */
1469: /* allocation lengths, and split up the range as necessary. */
1470:
1.1 noro 1471: /* We assume that GC_remap is called on exactly the same range */
1472: /* as a previous call to GC_unmap. It is safe to consistently */
1473: /* round the endpoints in both places. */
1474: void GC_unmap(ptr_t start, word bytes)
1475: {
1476: ptr_t start_addr = GC_unmap_start(start, bytes);
1477: ptr_t end_addr = GC_unmap_end(start, bytes);
1478: word len = end_addr - start_addr;
1479: if (0 == start_addr) return;
1.4 noro 1480: # if defined(MSWIN32) || defined(MSWINCE)
1481: while (len != 0) {
1482: MEMORY_BASIC_INFORMATION mem_info;
1483: GC_word free_len;
1484: if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1485: != sizeof(mem_info))
1486: ABORT("Weird VirtualQuery result");
1487: free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1488: if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1489: ABORT("VirtualFree failed");
1490: GC_unmapped_bytes += free_len;
1491: start_addr += free_len;
1492: len -= free_len;
1493: }
1494: # else
1495: if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1496: GC_unmapped_bytes += len;
1497: # endif
1.1 noro 1498: }
1499:
1500:
1501: void GC_remap(ptr_t start, word bytes)
1502: {
1503: static int zero_descr = -1;
1504: ptr_t start_addr = GC_unmap_start(start, bytes);
1505: ptr_t end_addr = GC_unmap_end(start, bytes);
1506: word len = end_addr - start_addr;
1507: ptr_t result;
1508:
1.4 noro 1509: # if defined(MSWIN32) || defined(MSWINCE)
1510: if (0 == start_addr) return;
1511: while (len != 0) {
1512: MEMORY_BASIC_INFORMATION mem_info;
1513: GC_word alloc_len;
1514: if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1515: != sizeof(mem_info))
1516: ABORT("Weird VirtualQuery result");
1517: alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1518: result = VirtualAlloc(start_addr, alloc_len,
1519: MEM_COMMIT,
1520: PAGE_EXECUTE_READWRITE);
1521: if (result != start_addr) {
1522: ABORT("VirtualAlloc remapping failed");
1523: }
1524: GC_unmapped_bytes -= alloc_len;
1525: start_addr += alloc_len;
1526: len -= alloc_len;
1527: }
1528: # else
1529: if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1530: if (0 == start_addr) return;
1531: result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1532: MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1533: if (result != start_addr) {
1534: ABORT("mmap remapping failed");
1535: }
1536: GC_unmapped_bytes -= len;
1537: # endif
1.1 noro 1538: }
1539:
1540: /* Two adjacent blocks have already been unmapped and are about to */
1541: /* be merged. Unmap the whole block. This typically requires */
1542: /* that we unmap a small section in the middle that was not previously */
1543: /* unmapped due to alignment constraints. */
1544: void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1545: {
1546: ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1547: ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1548: ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1549: ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1550: ptr_t start_addr = end1_addr;
1551: ptr_t end_addr = start2_addr;
1552: word len;
1553: GC_ASSERT(start1 + bytes1 == start2);
1554: if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1555: if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1556: if (0 == start_addr) return;
1557: len = end_addr - start_addr;
1.4 noro 1558: # if defined(MSWIN32) || defined(MSWINCE)
1559: while (len != 0) {
1560: MEMORY_BASIC_INFORMATION mem_info;
1561: GC_word free_len;
1562: if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1563: != sizeof(mem_info))
1564: ABORT("Weird VirtualQuery result");
1565: free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1566: if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1567: ABORT("VirtualFree failed");
1568: GC_unmapped_bytes += free_len;
1569: start_addr += free_len;
1570: len -= free_len;
1571: }
1572: # else
1573: if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1574: GC_unmapped_bytes += len;
1575: # endif
1.1 noro 1576: }
1577:
1578: #endif /* USE_MUNMAP */
1579:
1580: /* Routine for pushing any additional roots. In THREADS */
1581: /* environment, this is also responsible for marking from */
1.4 noro 1582: /* thread stacks. */
1.1 noro 1583: #ifndef THREADS
1584: void (*GC_push_other_roots)() = 0;
1585: #else /* THREADS */
1586:
1587: # ifdef PCR
1588: PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1589: {
1590: struct PCR_ThCtl_TInfoRep info;
1591: PCR_ERes result;
1592:
1593: info.ti_stkLow = info.ti_stkHi = 0;
1594: result = PCR_ThCtl_GetInfo(t, &info);
1595: GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1596: return(result);
1597: }
1598:
1599: /* Push the contents of an old object. We treat this as stack */
1600: /* data only becasue that makes it robust against mark stack */
1601: /* overflow. */
1602: PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1603: {
1604: GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1605: return(PCR_ERes_okay);
1606: }
1607:
1608:
1.4 noro 1609: void GC_default_push_other_roots GC_PROTO((void))
1.1 noro 1610: {
1611: /* Traverse data allocated by previous memory managers. */
1612: {
1613: extern struct PCR_MM_ProcsRep * GC_old_allocator;
1614:
1615: if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1616: GC_push_old_obj, 0)
1617: != PCR_ERes_okay) {
1618: ABORT("Old object enumeration failed");
1619: }
1620: }
1621: /* Traverse all thread stacks. */
1622: if (PCR_ERes_IsErr(
1623: PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1624: || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1625: ABORT("Thread stack marking failed\n");
1626: }
1627: }
1628:
1629: # endif /* PCR */
1630:
1631: # ifdef SRC_M3
1632:
1633: # ifdef ALL_INTERIOR_POINTERS
1634: --> misconfigured
1635: # endif
1636:
1.4 noro 1637: void GC_push_thread_structures GC_PROTO((void))
1638: {
1639: /* Not our responsibibility. */
1640: }
1.1 noro 1641:
1642: extern void ThreadF__ProcessStacks();
1643:
1644: void GC_push_thread_stack(start, stop)
1645: word start, stop;
1646: {
1647: GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1648: }
1649:
1650: /* Push routine with M3 specific calling convention. */
1651: GC_m3_push_root(dummy1, p, dummy2, dummy3)
1652: word *p;
1653: ptr_t dummy1, dummy2;
1654: int dummy3;
1655: {
1656: word q = *p;
1657:
1.4 noro 1658: GC_PUSH_ONE_STACK(q, p);
1.1 noro 1659: }
1660:
1661: /* M3 set equivalent to RTHeap.TracedRefTypes */
1662: typedef struct { int elts[1]; } RefTypeSet;
1663: RefTypeSet GC_TracedRefTypes = {{0x1}};
1664:
1.4 noro 1665: void GC_default_push_other_roots GC_PROTO((void))
1.1 noro 1666: {
1.4 noro 1667: /* Use the M3 provided routine for finding static roots. */
1668: /* This is a bit dubious, since it presumes no C roots. */
1669: /* We handle the collector roots explicitly in GC_push_roots */
1670: RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1.1 noro 1671: if (GC_words_allocd > 0) {
1672: ThreadF__ProcessStacks(GC_push_thread_stack);
1673: }
1674: /* Otherwise this isn't absolutely necessary, and we have */
1675: /* startup ordering problems. */
1676: }
1677:
1678: # endif /* SRC_M3 */
1679:
1.6 ! noro 1680: # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
! 1681: defined(GC_WIN32_THREADS)
1.1 noro 1682:
1683: extern void GC_push_all_stacks();
1684:
1.4 noro 1685: void GC_default_push_other_roots GC_PROTO((void))
1.1 noro 1686: {
1687: GC_push_all_stacks();
1688: }
1689:
1.6 ! noro 1690: # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
1.1 noro 1691:
1.4 noro 1692: void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
1.1 noro 1693:
1.6 ! noro 1694: #endif /* THREADS */
1.1 noro 1695:
1696: /*
1697: * Routines for accessing dirty bits on virtual pages.
1.4 noro 1698: * We plan to eventually implement four strategies for doing so:
1.1 noro 1699: * DEFAULT_VDB: A simple dummy implementation that treats every page
1700: * as possibly dirty. This makes incremental collection
1701: * useless, but the implementation is still correct.
1702: * PCR_VDB: Use PPCRs virtual dirty bit facility.
1703: * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1704: * works under some SVR4 variants. Even then, it may be
1705: * too slow to be entirely satisfactory. Requires reading
1706: * dirty bits for entire address space. Implementations tend
1707: * to assume that the client is a (slow) debugger.
1708: * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1709: * dirtied pages. The implementation (and implementability)
1710: * is highly system dependent. This usually fails when system
1711: * calls write to a protected page. We prevent the read system
1712: * call from doing so. It is the clients responsibility to
1713: * make sure that other system calls are similarly protected
1714: * or write only to the stack.
1715: */
1716:
1717: GC_bool GC_dirty_maintained = FALSE;
1718:
1719: # ifdef DEFAULT_VDB
1720:
1721: /* All of the following assume the allocation lock is held, and */
1722: /* signals are disabled. */
1723:
1724: /* The client asserts that unallocated pages in the heap are never */
1725: /* written. */
1726:
1727: /* Initialize virtual dirty bit implementation. */
1728: void GC_dirty_init()
1729: {
1730: GC_dirty_maintained = TRUE;
1731: }
1732:
1733: /* Retrieve system dirty bits for heap to a local buffer. */
1734: /* Restore the systems notion of which pages are dirty. */
1735: void GC_read_dirty()
1736: {}
1737:
1738: /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1739: /* If the actual page size is different, this returns TRUE if any */
1740: /* of the pages overlapping h are dirty. This routine may err on the */
1741: /* side of labelling pages as dirty (and this implementation does). */
1742: /*ARGSUSED*/
1743: GC_bool GC_page_was_dirty(h)
1744: struct hblk *h;
1745: {
1746: return(TRUE);
1747: }
1748:
1749: /*
1750: * The following two routines are typically less crucial. They matter
1751: * most with large dynamic libraries, or if we can't accurately identify
1752: * stacks, e.g. under Solaris 2.X. Otherwise the following default
1753: * versions are adequate.
1754: */
1755:
1756: /* Could any valid GC heap pointer ever have been written to this page? */
1757: /*ARGSUSED*/
1758: GC_bool GC_page_was_ever_dirty(h)
1759: struct hblk *h;
1760: {
1761: return(TRUE);
1762: }
1763:
1764: /* Reset the n pages starting at h to "was never dirty" status. */
1765: void GC_is_fresh(h, n)
1766: struct hblk *h;
1767: word n;
1768: {
1769: }
1770:
1.6 ! noro 1771: /* A call that: */
! 1772: /* I) hints that [h, h+nblocks) is about to be written. */
! 1773: /* II) guarantees that protection is removed. */
! 1774: /* (I) may speed up some dirty bit implementations. */
! 1775: /* (II) may be essential if we need to ensure that */
! 1776: /* pointer-free system call buffers in the heap are */
! 1777: /* not protected. */
1.1 noro 1778: /*ARGSUSED*/
1.6 ! noro 1779: void GC_remove_protection(h, nblocks, is_ptrfree)
1.1 noro 1780: struct hblk *h;
1.6 ! noro 1781: word nblocks;
! 1782: GC_bool is_ptrfree;
1.1 noro 1783: {
1784: }
1785:
1786: # endif /* DEFAULT_VDB */
1787:
1788:
1789: # ifdef MPROTECT_VDB
1790:
1791: /*
1792: * See DEFAULT_VDB for interface descriptions.
1793: */
1794:
1795: /*
1796: * This implementation maintains dirty bits itself by catching write
1797: * faults and keeping track of them. We assume nobody else catches
1.6 ! noro 1798: * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
! 1799: * This means that clients must ensure that system calls don't write
! 1800: * to the write-protected heap. Probably the best way to do this is to
! 1801: * ensure that system calls write at most to POINTERFREE objects in the
! 1802: * heap, and do even that only if we are on a platform on which those
! 1803: * are not protected. Another alternative is to wrap system calls
! 1804: * (see example for read below), but the current implementation holds
! 1805: * a lock across blocking calls, making it problematic for multithreaded
! 1806: * applications.
1.1 noro 1807: * We assume the page size is a multiple of HBLKSIZE.
1.6 ! noro 1808: * We prefer them to be the same. We avoid protecting POINTERFREE
! 1809: * objects only if they are the same.
1.1 noro 1810: */
1811:
1.4 noro 1812: # if !defined(MSWIN32) && !defined(MSWINCE)
1.1 noro 1813:
1814: # include <sys/mman.h>
1815: # include <signal.h>
1816: # include <sys/syscall.h>
1817:
1818: # define PROTECT(addr, len) \
1.2 noro 1819: if (mprotect((caddr_t)(addr), (size_t)(len), \
1.1 noro 1820: PROT_READ | OPT_PROT_EXEC) < 0) { \
1821: ABORT("mprotect failed"); \
1822: }
1823: # define UNPROTECT(addr, len) \
1.2 noro 1824: if (mprotect((caddr_t)(addr), (size_t)(len), \
1.1 noro 1825: PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1826: ABORT("un-mprotect failed"); \
1827: }
1828:
1829: # else
1830:
1.4 noro 1831: # ifndef MSWINCE
1832: # include <signal.h>
1833: # endif
1.1 noro 1834:
1835: static DWORD protect_junk;
1836: # define PROTECT(addr, len) \
1837: if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1838: &protect_junk)) { \
1839: DWORD last_error = GetLastError(); \
1840: GC_printf1("Last error code: %lx\n", last_error); \
1841: ABORT("VirtualProtect failed"); \
1842: }
1843: # define UNPROTECT(addr, len) \
1844: if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1845: &protect_junk)) { \
1846: ABORT("un-VirtualProtect failed"); \
1847: }
1848:
1849: # endif
1850:
1851: #if defined(SUNOS4) || defined(FREEBSD)
1852: typedef void (* SIG_PF)();
1853: #endif
1.6 ! noro 1854: #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
! 1855: || defined(MACOSX) || defined(HURD)
1.2 noro 1856: # ifdef __STDC__
1.1 noro 1857: typedef void (* SIG_PF)(int);
1.2 noro 1858: # else
1859: typedef void (* SIG_PF)();
1860: # endif
1.1 noro 1861: #endif
1862: #if defined(MSWIN32)
1863: typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
1864: # undef SIG_DFL
1865: # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1866: #endif
1.4 noro 1867: #if defined(MSWINCE)
1868: typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
1869: # undef SIG_DFL
1870: # define SIG_DFL (SIG_PF) (-1)
1871: #endif
1.1 noro 1872:
1.6 ! noro 1873: #if defined(IRIX5) || defined(OSF1) || defined(HURD)
1.1 noro 1874: typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1875: #endif
1876: #if defined(SUNOS5SIGS)
1.2 noro 1877: # ifdef HPUX
1878: # define SIGINFO __siginfo
1879: # else
1880: # define SIGINFO siginfo
1881: # endif
1882: # ifdef __STDC__
1883: typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
1884: # else
1885: typedef void (* REAL_SIG_PF)();
1886: # endif
1.1 noro 1887: #endif
1888: #if defined(LINUX)
1.6 ! noro 1889: # if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
1.1 noro 1890: typedef struct sigcontext s_c;
1.6 ! noro 1891: # else /* glibc < 2.2 */
! 1892: # include <linux/version.h>
! 1893: # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
! 1894: typedef struct sigcontext s_c;
! 1895: # else
! 1896: typedef struct sigcontext_struct s_c;
! 1897: # endif
! 1898: # endif /* glibc < 2.2 */
1.2 noro 1899: # if defined(ALPHA) || defined(M68K)
1900: typedef void (* REAL_SIG_PF)(int, int, s_c *);
1901: # else
1.4 noro 1902: # if defined(IA64) || defined(HP_PA)
1.2 noro 1903: typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
1904: # else
1905: typedef void (* REAL_SIG_PF)(int, s_c);
1906: # endif
1907: # endif
1.1 noro 1908: # ifdef ALPHA
1909: /* Retrieve fault address from sigcontext structure by decoding */
1910: /* instruction. */
1911: char * get_fault_addr(s_c *sc) {
1912: unsigned instr;
1913: word faultaddr;
1914:
1915: instr = *((unsigned *)(sc->sc_pc));
1916: faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
1917: faultaddr += (word) (((int)instr << 16) >> 16);
1918: return (char *)faultaddr;
1919: }
1920: # endif /* !ALPHA */
1921: # endif
1922:
1.4 noro 1923: # if defined(MACOSX) /* Should also test for PowerPC? */
1924: typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1925:
1926: /* Decodes the machine instruction which was responsible for the sending of the
1927: SIGBUS signal. Sadly this is the only way to find the faulting address because
1928: the signal handler doesn't get it directly from the kernel (although it is
1929: available on the Mach level, but droppped by the BSD personality before it
1930: calls our signal handler...)
1931: This code should be able to deal correctly with all PPCs starting from the
1932: 601 up to and including the G4s (including Velocity Engine). */
1933: #define EXTRACT_OP1(iw) (((iw) & 0xFC000000) >> 26)
1934: #define EXTRACT_OP2(iw) (((iw) & 0x000007FE) >> 1)
1935: #define EXTRACT_REGA(iw) (((iw) & 0x001F0000) >> 16)
1936: #define EXTRACT_REGB(iw) (((iw) & 0x03E00000) >> 21)
1937: #define EXTRACT_REGC(iw) (((iw) & 0x0000F800) >> 11)
1938: #define EXTRACT_DISP(iw) ((short *) &(iw))[1]
1939:
1940: static char *get_fault_addr(struct sigcontext *scp)
1941: {
1942: unsigned int instr = *((unsigned int *) scp->sc_ir);
1943: unsigned int * regs = &((unsigned int *) scp->sc_regs)[2];
1944: int disp = 0, tmp;
1945: unsigned int baseA = 0, baseB = 0;
1946: unsigned int addr, alignmask = 0xFFFFFFFF;
1947:
1948: #ifdef GC_DEBUG_DECODER
1949: GC_err_printf1("Instruction: 0x%lx\n", instr);
1950: GC_err_printf1("Opcode 1: d\n", (int)EXTRACT_OP1(instr));
1951: #endif
1952: switch(EXTRACT_OP1(instr)) {
1953: case 38: /* stb */
1954: case 39: /* stbu */
1955: case 54: /* stfd */
1956: case 55: /* stfdu */
1957: case 52: /* stfs */
1958: case 53: /* stfsu */
1959: case 44: /* sth */
1960: case 45: /* sthu */
1961: case 47: /* stmw */
1962: case 36: /* stw */
1963: case 37: /* stwu */
1964: tmp = EXTRACT_REGA(instr);
1965: if(tmp > 0)
1966: baseA = regs[tmp];
1967: disp = EXTRACT_DISP(instr);
1968: break;
1969: case 31:
1970: #ifdef GC_DEBUG_DECODER
1971: GC_err_printf1("Opcode 2: %d\n", (int)EXTRACT_OP2(instr));
1972: #endif
1973: switch(EXTRACT_OP2(instr)) {
1974: case 86: /* dcbf */
1975: case 54: /* dcbst */
1976: case 1014: /* dcbz */
1977: case 247: /* stbux */
1978: case 215: /* stbx */
1979: case 759: /* stfdux */
1980: case 727: /* stfdx */
1981: case 983: /* stfiwx */
1982: case 695: /* stfsux */
1983: case 663: /* stfsx */
1984: case 918: /* sthbrx */
1985: case 439: /* sthux */
1986: case 407: /* sthx */
1987: case 661: /* stswx */
1988: case 662: /* stwbrx */
1989: case 150: /* stwcx. */
1990: case 183: /* stwux */
1991: case 151: /* stwx */
1992: case 135: /* stvebx */
1993: case 167: /* stvehx */
1994: case 199: /* stvewx */
1995: case 231: /* stvx */
1996: case 487: /* stvxl */
1997: tmp = EXTRACT_REGA(instr);
1998: if(tmp > 0)
1999: baseA = regs[tmp];
2000: baseB = regs[EXTRACT_REGC(instr)];
2001: /* determine Altivec alignment mask */
2002: switch(EXTRACT_OP2(instr)) {
2003: case 167: /* stvehx */
2004: alignmask = 0xFFFFFFFE;
2005: break;
2006: case 199: /* stvewx */
2007: alignmask = 0xFFFFFFFC;
2008: break;
2009: case 231: /* stvx */
2010: alignmask = 0xFFFFFFF0;
2011: break;
2012: case 487: /* stvxl */
2013: alignmask = 0xFFFFFFF0;
2014: break;
2015: }
2016: break;
2017: case 725: /* stswi */
2018: tmp = EXTRACT_REGA(instr);
2019: if(tmp > 0)
2020: baseA = regs[tmp];
2021: break;
2022: default: /* ignore instruction */
2023: #ifdef GC_DEBUG_DECODER
2024: GC_err_printf("Ignored by inner handler\n");
2025: #endif
2026: return NULL;
2027: break;
2028: }
2029: break;
2030: default: /* ignore instruction */
2031: #ifdef GC_DEBUG_DECODER
2032: GC_err_printf("Ignored by main handler\n");
2033: #endif
2034: return NULL;
2035: break;
2036: }
2037:
2038: addr = (baseA + baseB) + disp;
2039: addr &= alignmask;
2040: #ifdef GC_DEBUG_DECODER
2041: GC_err_printf1("BaseA: %d\n", baseA);
2042: GC_err_printf1("BaseB: %d\n", baseB);
2043: GC_err_printf1("Disp: %d\n", disp);
2044: GC_err_printf1("Address: %d\n", addr);
2045: #endif
2046: return (char *)addr;
2047: }
2048: #endif /* MACOSX */
2049:
1.1 noro 2050: SIG_PF GC_old_bus_handler;
2051: SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
2052:
1.4 noro 2053: #ifdef THREADS
2054: /* We need to lock around the bitmap update in the write fault handler */
2055: /* in order to avoid the risk of losing a bit. We do this with a */
2056: /* test-and-set spin lock if we know how to do that. Otherwise we */
2057: /* check whether we are already in the handler and use the dumb but */
2058: /* safe fallback algorithm of setting all bits in the word. */
2059: /* Contention should be very rare, so we do the minimum to handle it */
2060: /* correctly. */
2061: #ifdef GC_TEST_AND_SET_DEFINED
2062: static VOLATILE unsigned int fault_handler_lock = 0;
2063: void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
1.6 ! noro 2064: while (GC_test_and_set(&fault_handler_lock)) {}
1.4 noro 2065: /* Could also revert to set_pht_entry_from_index_safe if initial */
2066: /* GC_test_and_set fails. */
2067: set_pht_entry_from_index(db, index);
2068: GC_clear(&fault_handler_lock);
2069: }
2070: #else /* !GC_TEST_AND_SET_DEFINED */
2071: /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2072: /* just before we notice the conflict and correct it. We may end up */
2073: /* looking at it while it's wrong. But this requires contention */
2074: /* exactly when a GC is triggered, which seems far less likely to */
2075: /* fail than the old code, which had no reported failures. Thus we */
2076: /* leave it this way while we think of something better, or support */
2077: /* GC_test_and_set on the remaining platforms. */
2078: static VOLATILE word currently_updating = 0;
2079: void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2080: unsigned int update_dummy;
2081: currently_updating = (word)(&update_dummy);
2082: set_pht_entry_from_index(db, index);
2083: /* If we get contention in the 10 or so instruction window here, */
2084: /* and we get stopped by a GC between the two updates, we lose! */
2085: if (currently_updating != (word)(&update_dummy)) {
2086: set_pht_entry_from_index_safe(db, index);
2087: /* We claim that if two threads concurrently try to update the */
2088: /* dirty bit vector, the first one to execute UPDATE_START */
2089: /* will see it changed when UPDATE_END is executed. (Note that */
2090: /* &update_dummy must differ in two distinct threads.) It */
2091: /* will then execute set_pht_entry_from_index_safe, thus */
2092: /* returning us to a safe state, though not soon enough. */
2093: }
2094: }
2095: #endif /* !GC_TEST_AND_SET_DEFINED */
2096: #else /* !THREADS */
2097: # define async_set_pht_entry_from_index(db, index) \
2098: set_pht_entry_from_index(db, index)
2099: #endif /* !THREADS */
2100:
1.1 noro 2101: /*ARGSUSED*/
2102: # if defined (SUNOS4) || defined(FREEBSD)
2103: void GC_write_fault_handler(sig, code, scp, addr)
2104: int sig, code;
2105: struct sigcontext *scp;
2106: char * addr;
2107: # ifdef SUNOS4
2108: # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2109: # define CODE_OK (FC_CODE(code) == FC_PROT \
2110: || (FC_CODE(code) == FC_OBJERR \
2111: && FC_ERRNO(code) == FC_PROT))
2112: # endif
2113: # ifdef FREEBSD
2114: # define SIG_OK (sig == SIGBUS)
2115: # define CODE_OK (code == BUS_PAGE_FAULT)
2116: # endif
2117: # endif
1.6 ! noro 2118: # if defined(IRIX5) || defined(OSF1) || defined(HURD)
1.1 noro 2119: # include <errno.h>
2120: void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2121: # ifdef OSF1
1.6 ! noro 2122: # define SIG_OK (sig == SIGSEGV)
1.1 noro 2123: # define CODE_OK (code == 2 /* experimentally determined */)
2124: # endif
2125: # ifdef IRIX5
1.6 ! noro 2126: # define SIG_OK (sig == SIGSEGV)
1.1 noro 2127: # define CODE_OK (code == EACCES)
2128: # endif
1.6 ! noro 2129: # ifdef HURD
! 2130: # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
! 2131: # define CODE_OK TRUE
! 2132: # endif
1.1 noro 2133: # endif
2134: # if defined(LINUX)
1.2 noro 2135: # if defined(ALPHA) || defined(M68K)
1.1 noro 2136: void GC_write_fault_handler(int sig, int code, s_c * sc)
2137: # else
1.4 noro 2138: # if defined(IA64) || defined(HP_PA)
1.2 noro 2139: void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
2140: # else
1.6 ! noro 2141: # if defined(ARM32)
! 2142: void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
! 2143: # else
! 2144: void GC_write_fault_handler(int sig, s_c sc)
! 2145: # endif
1.2 noro 2146: # endif
1.1 noro 2147: # endif
2148: # define SIG_OK (sig == SIGSEGV)
2149: # define CODE_OK TRUE
1.2 noro 2150: /* Empirically c.trapno == 14, on IA32, but is that useful? */
2151: /* Should probably consider alignment issues on other */
2152: /* architectures. */
1.1 noro 2153: # endif
2154: # if defined(SUNOS5SIGS)
1.2 noro 2155: # ifdef __STDC__
2156: void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
2157: # else
2158: void GC_write_fault_handler(sig, scp, context)
2159: int sig;
2160: struct SIGINFO *scp;
2161: void * context;
2162: # endif
2163: # ifdef HPUX
2164: # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2165: # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2166: || (scp -> si_code == BUS_ADRERR) \
2167: || (scp -> si_code == BUS_UNKNOWN) \
2168: || (scp -> si_code == SEGV_UNKNOWN) \
2169: || (scp -> si_code == BUS_OBJERR)
2170: # else
2171: # define SIG_OK (sig == SIGSEGV)
2172: # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2173: # endif
1.1 noro 2174: # endif
1.4 noro 2175:
2176: # if defined(MACOSX)
2177: void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2178: # define SIG_OK (sig == SIGBUS)
2179: # define CODE_OK (code == 0 /* experimentally determined */)
2180: # endif
2181:
2182: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 2183: LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
2184: # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1.4 noro 2185: STATUS_ACCESS_VIOLATION)
1.1 noro 2186: # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2187: /* Write fault */
2188: # endif
2189: {
2190: register unsigned i;
1.6 ! noro 2191: # if defined(HURD)
! 2192: char *addr = (char *) code;
! 2193: # endif
1.1 noro 2194: # ifdef IRIX5
2195: char * addr = (char *) (size_t) (scp -> sc_badvaddr);
2196: # endif
2197: # if defined(OSF1) && defined(ALPHA)
2198: char * addr = (char *) (scp -> sc_traparg_a0);
2199: # endif
2200: # ifdef SUNOS5SIGS
2201: char * addr = (char *) (scp -> si_addr);
2202: # endif
2203: # ifdef LINUX
2204: # ifdef I386
2205: char * addr = (char *) (sc.cr2);
2206: # else
2207: # if defined(M68K)
2208: char * addr = NULL;
2209:
1.4 noro 2210: struct sigcontext *scp = (struct sigcontext *)(sc);
1.1 noro 2211:
2212: int format = (scp->sc_formatvec >> 12) & 0xf;
2213: unsigned long *framedata = (unsigned long *)(scp + 1);
2214: unsigned long ea;
2215:
2216: if (format == 0xa || format == 0xb) {
2217: /* 68020/030 */
2218: ea = framedata[2];
2219: } else if (format == 7) {
2220: /* 68040 */
2221: ea = framedata[3];
1.4 noro 2222: if (framedata[1] & 0x08000000) {
2223: /* correct addr on misaligned access */
2224: ea = (ea+4095)&(~4095);
2225: }
1.1 noro 2226: } else if (format == 4) {
2227: /* 68060 */
2228: ea = framedata[0];
2229: if (framedata[1] & 0x08000000) {
2230: /* correct addr on misaligned access */
2231: ea = (ea+4095)&(~4095);
2232: }
2233: }
2234: addr = (char *)ea;
2235: # else
2236: # ifdef ALPHA
2237: char * addr = get_fault_addr(sc);
2238: # else
1.4 noro 2239: # if defined(IA64) || defined(HP_PA)
1.2 noro 2240: char * addr = si -> si_addr;
1.3 noro 2241: /* I believe this is claimed to work on all platforms for */
2242: /* Linux 2.3.47 and later. Hopefully we don't have to */
2243: /* worry about earlier kernels on IA64. */
1.2 noro 2244: # else
2245: # if defined(POWERPC)
2246: char * addr = (char *) (sc.regs->dar);
2247: # else
1.6 ! noro 2248: # if defined(ARM32)
! 2249: char * addr = (char *)sc.fault_address;
! 2250: # else
! 2251: --> architecture not supported
! 2252: # endif
1.2 noro 2253: # endif
2254: # endif
1.1 noro 2255: # endif
2256: # endif
2257: # endif
2258: # endif
1.4 noro 2259: # if defined(MACOSX)
2260: char * addr = get_fault_addr(scp);
2261: # endif
2262: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 2263: char * addr = (char *) (exc_info -> ExceptionRecord
2264: -> ExceptionInformation[1]);
2265: # define sig SIGSEGV
2266: # endif
2267:
2268: if (SIG_OK && CODE_OK) {
2269: register struct hblk * h =
2270: (struct hblk *)((word)addr & ~(GC_page_size-1));
2271: GC_bool in_allocd_block;
2272:
2273: # ifdef SUNOS5SIGS
2274: /* Address is only within the correct physical page. */
2275: in_allocd_block = FALSE;
2276: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2277: if (HDR(h+i) != 0) {
2278: in_allocd_block = TRUE;
2279: }
2280: }
2281: # else
2282: in_allocd_block = (HDR(addr) != 0);
2283: # endif
2284: if (!in_allocd_block) {
2285: /* Heap blocks now begin and end on page boundaries */
2286: SIG_PF old_handler;
2287:
2288: if (sig == SIGSEGV) {
2289: old_handler = GC_old_segv_handler;
2290: } else {
2291: old_handler = GC_old_bus_handler;
2292: }
2293: if (old_handler == SIG_DFL) {
1.4 noro 2294: # if !defined(MSWIN32) && !defined(MSWINCE)
1.1 noro 2295: GC_err_printf1("Segfault at 0x%lx\n", addr);
2296: ABORT("Unexpected bus error or segmentation fault");
2297: # else
2298: return(EXCEPTION_CONTINUE_SEARCH);
2299: # endif
2300: } else {
2301: # if defined (SUNOS4) || defined(FREEBSD)
2302: (*old_handler) (sig, code, scp, addr);
2303: return;
2304: # endif
2305: # if defined (SUNOS5SIGS)
2306: (*(REAL_SIG_PF)old_handler) (sig, scp, context);
2307: return;
2308: # endif
2309: # if defined (LINUX)
1.2 noro 2310: # if defined(ALPHA) || defined(M68K)
1.1 noro 2311: (*(REAL_SIG_PF)old_handler) (sig, code, sc);
2312: # else
1.4 noro 2313: # if defined(IA64) || defined(HP_PA)
1.2 noro 2314: (*(REAL_SIG_PF)old_handler) (sig, si, scp);
2315: # else
1.1 noro 2316: (*(REAL_SIG_PF)old_handler) (sig, sc);
1.2 noro 2317: # endif
1.1 noro 2318: # endif
2319: return;
2320: # endif
1.6 ! noro 2321: # if defined (IRIX5) || defined(OSF1) || defined(HURD)
1.1 noro 2322: (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2323: return;
2324: # endif
1.4 noro 2325: # ifdef MACOSX
2326: (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2327: # endif
1.1 noro 2328: # ifdef MSWIN32
2329: return((*old_handler)(exc_info));
2330: # endif
2331: }
2332: }
1.6 ! noro 2333: UNPROTECT(h, GC_page_size);
! 2334: /* We need to make sure that no collection occurs between */
! 2335: /* the UNPROTECT and the setting of the dirty bit. Otherwise */
! 2336: /* a write by a third thread might go unnoticed. Reversing */
! 2337: /* the order is just as bad, since we would end up unprotecting */
! 2338: /* a page in a GC cycle during which it's not marked. */
! 2339: /* Currently we do this by disabling the thread stopping */
! 2340: /* signals while this handler is running. An alternative might */
! 2341: /* be to record the fact that we're about to unprotect, or */
! 2342: /* have just unprotected a page in the GC's thread structure, */
! 2343: /* and then to have the thread stopping code set the dirty */
! 2344: /* flag, if necessary. */
1.1 noro 2345: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2346: register int index = PHT_HASH(h+i);
2347:
1.4 noro 2348: async_set_pht_entry_from_index(GC_dirty_pages, index);
1.1 noro 2349: }
1.6 ! noro 2350: # if defined(OSF1)
1.1 noro 2351: /* These reset the signal handler each time by default. */
2352: signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
2353: # endif
2354: /* The write may not take place before dirty bits are read. */
2355: /* But then we'll fault again ... */
1.4 noro 2356: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 2357: return(EXCEPTION_CONTINUE_EXECUTION);
2358: # else
2359: return;
2360: # endif
2361: }
1.4 noro 2362: #if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 2363: return EXCEPTION_CONTINUE_SEARCH;
2364: #else
2365: GC_err_printf1("Segfault at 0x%lx\n", addr);
2366: ABORT("Unexpected bus error or segmentation fault");
2367: #endif
2368: }
2369:
2370: /*
2371: * We hold the allocation lock. We expect block h to be written
1.6 ! noro 2372: * shortly. Ensure that all pages containing any part of the n hblks
! 2373: * starting at h are no longer protected. If is_ptrfree is false,
! 2374: * also ensure that they will subsequently appear to be dirty.
1.1 noro 2375: */
1.6 ! noro 2376: void GC_remove_protection(h, nblocks, is_ptrfree)
1.1 noro 2377: struct hblk *h;
1.6 ! noro 2378: word nblocks;
! 2379: GC_bool is_ptrfree;
1.1 noro 2380: {
1.6 ! noro 2381: struct hblk * h_trunc; /* Truncated to page boundary */
! 2382: struct hblk * h_end; /* Page boundary following block end */
! 2383: struct hblk * current;
! 2384: GC_bool found_clean;
1.1 noro 2385:
2386: if (!GC_dirty_maintained) return;
2387: h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
1.6 ! noro 2388: h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
! 2389: & ~(GC_page_size-1));
1.1 noro 2390: found_clean = FALSE;
1.6 ! noro 2391: for (current = h_trunc; current < h_end; ++current) {
! 2392: int index = PHT_HASH(current);
1.1 noro 2393:
1.6 ! noro 2394: if (!is_ptrfree || current < h || current >= h + nblocks) {
1.4 noro 2395: async_set_pht_entry_from_index(GC_dirty_pages, index);
1.1 noro 2396: }
2397: }
1.6 ! noro 2398: UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
1.1 noro 2399: }
2400:
2401: void GC_dirty_init()
2402: {
1.6 ! noro 2403: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
! 2404: defined(OSF1) || defined(HURD)
1.4 noro 2405: struct sigaction act, oldact;
1.6 ! noro 2406: /* We should probably specify SA_SIGINFO for Linux, and handle */
! 2407: /* the different architectures more uniformly. */
! 2408: # if defined(IRIX5) || defined(LINUX) || defined(OSF1) || defined(HURD)
1.1 noro 2409: act.sa_flags = SA_RESTART;
1.6 ! noro 2410: act.sa_handler = (SIG_PF)GC_write_fault_handler;
1.4 noro 2411: # else
1.1 noro 2412: act.sa_flags = SA_RESTART | SA_SIGINFO;
2413: act.sa_sigaction = GC_write_fault_handler;
1.4 noro 2414: # endif
2415: (void)sigemptyset(&act.sa_mask);
1.6 ! noro 2416: # ifdef SIG_SUSPEND
! 2417: /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
! 2418: /* handler. This effectively makes the handler atomic w.r.t. */
! 2419: /* stopping the world for GC. */
! 2420: (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
! 2421: # endif /* SIG_SUSPEND */
1.4 noro 2422: # endif
2423: # if defined(MACOSX)
2424: struct sigaction act, oldact;
2425:
2426: act.sa_flags = SA_RESTART;
2427: act.sa_handler = GC_write_fault_handler;
2428: sigemptyset(&act.sa_mask);
1.1 noro 2429: # endif
2430: # ifdef PRINTSTATS
2431: GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2432: # endif
2433: GC_dirty_maintained = TRUE;
2434: if (GC_page_size % HBLKSIZE != 0) {
2435: GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2436: ABORT("Page size not multiple of HBLKSIZE");
2437: }
2438: # if defined(SUNOS4) || defined(FREEBSD)
2439: GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2440: if (GC_old_bus_handler == SIG_IGN) {
2441: GC_err_printf0("Previously ignored bus error!?");
2442: GC_old_bus_handler = SIG_DFL;
2443: }
2444: if (GC_old_bus_handler != SIG_DFL) {
2445: # ifdef PRINTSTATS
2446: GC_err_printf0("Replaced other SIGBUS handler\n");
2447: # endif
2448: }
2449: # endif
1.6 ! noro 2450: # if defined(SUNOS4)
1.1 noro 2451: GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2452: if (GC_old_segv_handler == SIG_IGN) {
2453: GC_err_printf0("Previously ignored segmentation violation!?");
2454: GC_old_segv_handler = SIG_DFL;
2455: }
2456: if (GC_old_segv_handler != SIG_DFL) {
2457: # ifdef PRINTSTATS
2458: GC_err_printf0("Replaced other SIGSEGV handler\n");
2459: # endif
2460: }
2461: # endif
1.6 ! noro 2462: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) \
! 2463: || defined(OSF1) || defined(HURD)
! 2464: /* SUNOS5SIGS includes HPUX */
! 2465: # if defined(GC_IRIX_THREADS)
1.1 noro 2466: sigaction(SIGSEGV, 0, &oldact);
2467: sigaction(SIGSEGV, &act, 0);
2468: # else
2469: sigaction(SIGSEGV, &act, &oldact);
2470: # endif
1.6 ! noro 2471: # if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
1.1 noro 2472: /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2473: /* sa_sigaction. */
2474: GC_old_segv_handler = oldact.sa_handler;
1.6 ! noro 2475: # else /* Irix 6.x or SUNOS5SIGS or LINUX */
1.1 noro 2476: if (oldact.sa_flags & SA_SIGINFO) {
2477: GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2478: } else {
2479: GC_old_segv_handler = oldact.sa_handler;
2480: }
2481: # endif
2482: if (GC_old_segv_handler == SIG_IGN) {
2483: GC_err_printf0("Previously ignored segmentation violation!?");
2484: GC_old_segv_handler = SIG_DFL;
2485: }
2486: if (GC_old_segv_handler != SIG_DFL) {
2487: # ifdef PRINTSTATS
2488: GC_err_printf0("Replaced other SIGSEGV handler\n");
2489: # endif
2490: }
1.4 noro 2491: # endif
1.6 ! noro 2492: # if defined(MACOSX) || defined(HPUX) || defined(LINUX) || defined(HURD)
1.4 noro 2493: sigaction(SIGBUS, &act, &oldact);
2494: GC_old_bus_handler = oldact.sa_handler;
2495: if (GC_old_bus_handler == SIG_IGN) {
2496: GC_err_printf0("Previously ignored bus error!?");
2497: GC_old_bus_handler = SIG_DFL;
2498: }
2499: if (GC_old_bus_handler != SIG_DFL) {
2500: # ifdef PRINTSTATS
2501: GC_err_printf0("Replaced other SIGBUS handler\n");
2502: # endif
2503: }
1.6 ! noro 2504: # endif /* MACOS || HPUX || LINUX */
1.1 noro 2505: # if defined(MSWIN32)
2506: GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2507: if (GC_old_segv_handler != NULL) {
2508: # ifdef PRINTSTATS
2509: GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2510: # endif
2511: } else {
2512: GC_old_segv_handler = SIG_DFL;
2513: }
2514: # endif
2515: }
2516:
1.6 ! noro 2517: int GC_incremental_protection_needs()
! 2518: {
! 2519: if (GC_page_size == HBLKSIZE) {
! 2520: return GC_PROTECTS_POINTER_HEAP;
! 2521: } else {
! 2522: return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
! 2523: }
! 2524: }
! 2525:
! 2526: #define HAVE_INCREMENTAL_PROTECTION_NEEDS
1.1 noro 2527:
1.6 ! noro 2528: #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
1.1 noro 2529:
1.6 ! noro 2530: #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
1.1 noro 2531: void GC_protect_heap()
2532: {
2533: ptr_t start;
2534: word len;
1.6 ! noro 2535: struct hblk * current;
! 2536: struct hblk * current_start; /* Start of block to be protected. */
! 2537: struct hblk * limit;
1.1 noro 2538: unsigned i;
1.6 ! noro 2539: GC_bool protect_all =
! 2540: (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
1.1 noro 2541: for (i = 0; i < GC_n_heap_sects; i++) {
2542: start = GC_heap_sects[i].hs_start;
2543: len = GC_heap_sects[i].hs_bytes;
1.6 ! noro 2544: if (protect_all) {
! 2545: PROTECT(start, len);
! 2546: } else {
! 2547: GC_ASSERT(PAGE_ALIGNED(len))
! 2548: GC_ASSERT(PAGE_ALIGNED(start))
! 2549: current_start = current = (struct hblk *)start;
! 2550: limit = (struct hblk *)(start + len);
! 2551: while (current < limit) {
! 2552: hdr * hhdr;
! 2553: word nhblks;
! 2554: GC_bool is_ptrfree;
! 2555:
! 2556: GC_ASSERT(PAGE_ALIGNED(current));
! 2557: GET_HDR(current, hhdr);
! 2558: if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
! 2559: /* This can happen only if we're at the beginning of a */
! 2560: /* heap segment, and a block spans heap segments. */
! 2561: /* We will handle that block as part of the preceding */
! 2562: /* segment. */
! 2563: GC_ASSERT(current_start == current);
! 2564: current_start = ++current;
! 2565: continue;
! 2566: }
! 2567: if (HBLK_IS_FREE(hhdr)) {
! 2568: GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
! 2569: nhblks = divHBLKSZ(hhdr -> hb_sz);
! 2570: is_ptrfree = TRUE; /* dirty on alloc */
! 2571: } else {
! 2572: nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
! 2573: is_ptrfree = IS_PTRFREE(hhdr);
! 2574: }
! 2575: if (is_ptrfree) {
! 2576: if (current_start < current) {
! 2577: PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
! 2578: }
! 2579: current_start = (current += nhblks);
! 2580: } else {
! 2581: current += nhblks;
! 2582: }
! 2583: }
! 2584: if (current_start < current) {
! 2585: PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
! 2586: }
! 2587: }
1.1 noro 2588: }
2589: }
2590:
2591: /* We assume that either the world is stopped or its OK to lose dirty */
2592: /* bits while this is happenning (as in GC_enable_incremental). */
2593: void GC_read_dirty()
2594: {
2595: BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2596: (sizeof GC_dirty_pages));
2597: BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2598: GC_protect_heap();
2599: }
2600:
2601: GC_bool GC_page_was_dirty(h)
2602: struct hblk * h;
2603: {
2604: register word index = PHT_HASH(h);
2605:
2606: return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2607: }
2608:
2609: /*
2610: * Acquiring the allocation lock here is dangerous, since this
2611: * can be called from within GC_call_with_alloc_lock, and the cord
2612: * package does so. On systems that allow nested lock acquisition, this
2613: * happens to work.
2614: * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2615: */
1.4 noro 2616:
2617: static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
1.1 noro 2618:
2619: void GC_begin_syscall()
2620: {
1.4 noro 2621: if (!I_HOLD_LOCK()) {
2622: LOCK();
2623: syscall_acquired_lock = TRUE;
2624: }
1.1 noro 2625: }
2626:
2627: void GC_end_syscall()
2628: {
1.4 noro 2629: if (syscall_acquired_lock) {
2630: syscall_acquired_lock = FALSE;
2631: UNLOCK();
2632: }
1.1 noro 2633: }
2634:
2635: void GC_unprotect_range(addr, len)
2636: ptr_t addr;
2637: word len;
2638: {
2639: struct hblk * start_block;
2640: struct hblk * end_block;
2641: register struct hblk *h;
2642: ptr_t obj_start;
2643:
1.6 ! noro 2644: if (!GC_dirty_maintained) return;
1.1 noro 2645: obj_start = GC_base(addr);
2646: if (obj_start == 0) return;
2647: if (GC_base(addr + len - 1) != obj_start) {
2648: ABORT("GC_unprotect_range(range bigger than object)");
2649: }
2650: start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2651: end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2652: end_block += GC_page_size/HBLKSIZE - 1;
2653: for (h = start_block; h <= end_block; h++) {
2654: register word index = PHT_HASH(h);
2655:
1.4 noro 2656: async_set_pht_entry_from_index(GC_dirty_pages, index);
1.1 noro 2657: }
2658: UNPROTECT(start_block,
2659: ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2660: }
2661:
1.6 ! noro 2662: #if 0
! 2663:
! 2664: /* We no longer wrap read by default, since that was causing too many */
! 2665: /* problems. It is preferred that the client instead avoids writing */
! 2666: /* to the write-protected heap with a system call. */
! 2667: /* This still serves as sample code if you do want to wrap system calls.*/
! 2668:
! 2669: #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
! 2670: /* Replacement for UNIX system call. */
! 2671: /* Other calls that write to the heap should be handled similarly. */
! 2672: /* Note that this doesn't work well for blocking reads: It will hold */
! 2673: /* the allocation lock for the entire duration of the call. Multithreaded */
! 2674: /* clients should really ensure that it won't block, either by setting */
! 2675: /* the descriptor nonblocking, or by calling select or poll first, to */
! 2676: /* make sure that input is available. */
! 2677: /* Another, preferred alternative is to ensure that system calls never */
! 2678: /* write to the protected heap (see above). */
1.1 noro 2679: # if defined(__STDC__) && !defined(SUNOS4)
2680: # include <unistd.h>
1.3 noro 2681: # include <sys/uio.h>
1.1 noro 2682: ssize_t read(int fd, void *buf, size_t nbyte)
2683: # else
2684: # ifndef LINT
2685: int read(fd, buf, nbyte)
2686: # else
2687: int GC_read(fd, buf, nbyte)
2688: # endif
2689: int fd;
2690: char *buf;
2691: int nbyte;
2692: # endif
2693: {
2694: int result;
2695:
2696: GC_begin_syscall();
2697: GC_unprotect_range(buf, (word)nbyte);
1.6 ! noro 2698: # if defined(IRIX5) || defined(GC_LINUX_THREADS)
1.1 noro 2699: /* Indirect system call may not always be easily available. */
2700: /* We could call _read, but that would interfere with the */
2701: /* libpthread interception of read. */
1.3 noro 2702: /* On Linux, we have to be careful with the linuxthreads */
2703: /* read interception. */
1.1 noro 2704: {
2705: struct iovec iov;
2706:
2707: iov.iov_base = buf;
2708: iov.iov_len = nbyte;
2709: result = readv(fd, &iov, 1);
2710: }
2711: # else
1.6 ! noro 2712: # if defined(HURD)
! 2713: result = __read(fd, buf, nbyte);
! 2714: # else
1.4 noro 2715: /* The two zero args at the end of this list are because one
2716: IA-64 syscall() implementation actually requires six args
2717: to be passed, even though they aren't always used. */
2718: result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
1.6 ! noro 2719: # endif /* !HURD */
1.1 noro 2720: # endif
2721: GC_end_syscall();
2722: return(result);
2723: }
1.6 ! noro 2724: #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
1.3 noro 2725:
1.6 ! noro 2726: #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
1.3 noro 2727: /* We use the GNU ld call wrapping facility. */
2728: /* This requires that the linker be invoked with "--wrap read". */
2729: /* This can be done by passing -Wl,"--wrap read" to gcc. */
2730: /* I'm not sure that this actually wraps whatever version of read */
2731: /* is called by stdio. That code also mentions __read. */
2732: # include <unistd.h>
2733: ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2734: {
2735: int result;
2736:
2737: GC_begin_syscall();
2738: GC_unprotect_range(buf, (word)nbyte);
2739: result = __real_read(fd, buf, nbyte);
2740: GC_end_syscall();
2741: return(result);
2742: }
2743:
2744: /* We should probably also do this for __read, or whatever stdio */
2745: /* actually calls. */
2746: #endif
1.1 noro 2747:
1.6 ! noro 2748: #endif /* 0 */
! 2749:
1.1 noro 2750: /*ARGSUSED*/
2751: GC_bool GC_page_was_ever_dirty(h)
2752: struct hblk *h;
2753: {
2754: return(TRUE);
2755: }
2756:
2757: /* Reset the n pages starting at h to "was never dirty" status. */
2758: /*ARGSUSED*/
2759: void GC_is_fresh(h, n)
2760: struct hblk *h;
2761: word n;
2762: {
2763: }
2764:
1.4 noro 2765: # else /* !MPROTECT_VDB */
2766:
2767: # ifdef GC_USE_LD_WRAP
2768: ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2769: { return __real_read(fd, buf, nbyte); }
2770: # endif
2771:
1.1 noro 2772: # endif /* MPROTECT_VDB */
2773:
2774: # ifdef PROC_VDB
2775:
2776: /*
2777: * See DEFAULT_VDB for interface descriptions.
2778: */
2779:
2780: /*
2781: * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2782: * from which we can read page modified bits. This facility is far from
2783: * optimal (e.g. we would like to get the info for only some of the
2784: * address space), but it avoids intercepting system calls.
2785: */
2786:
2787: #include <errno.h>
2788: #include <sys/types.h>
2789: #include <sys/signal.h>
2790: #include <sys/fault.h>
2791: #include <sys/syscall.h>
2792: #include <sys/procfs.h>
2793: #include <sys/stat.h>
2794:
2795: #define INITIAL_BUF_SZ 4096
2796: word GC_proc_buf_size = INITIAL_BUF_SZ;
2797: char *GC_proc_buf;
2798:
1.6 ! noro 2799: #ifdef GC_SOLARIS_THREADS
1.1 noro 2800: /* We don't have exact sp values for threads. So we count on */
2801: /* occasionally declaring stack pages to be fresh. Thus we */
2802: /* need a real implementation of GC_is_fresh. We can't clear */
2803: /* entries in GC_written_pages, since that would declare all */
2804: /* pages with the given hash address to be fresh. */
2805: # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2806: struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
2807: /* Collisions are dropped. */
2808:
2809: # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2810: # define ADD_FRESH_PAGE(h) \
2811: GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2812: # define PAGE_IS_FRESH(h) \
2813: (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2814: #endif
2815:
2816: /* Add all pages in pht2 to pht1 */
2817: void GC_or_pages(pht1, pht2)
2818: page_hash_table pht1, pht2;
2819: {
2820: register int i;
2821:
2822: for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2823: }
2824:
2825: int GC_proc_fd;
2826:
2827: void GC_dirty_init()
2828: {
2829: int fd;
2830: char buf[30];
2831:
2832: GC_dirty_maintained = TRUE;
2833: if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
2834: register int i;
2835:
2836: for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
2837: # ifdef PRINTSTATS
2838: GC_printf1("Allocated words:%lu:all pages may have been written\n",
2839: (unsigned long)
2840: (GC_words_allocd + GC_words_allocd_before_gc));
2841: # endif
2842: }
2843: sprintf(buf, "/proc/%d", getpid());
2844: fd = open(buf, O_RDONLY);
2845: if (fd < 0) {
2846: ABORT("/proc open failed");
2847: }
2848: GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
2849: close(fd);
2850: if (GC_proc_fd < 0) {
2851: ABORT("/proc ioctl failed");
2852: }
2853: GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
1.6 ! noro 2854: # ifdef GC_SOLARIS_THREADS
1.1 noro 2855: GC_fresh_pages = (struct hblk **)
2856: GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
2857: if (GC_fresh_pages == 0) {
2858: GC_err_printf0("No space for fresh pages\n");
2859: EXIT();
2860: }
2861: BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
2862: # endif
2863: }
2864:
2865: /* Ignore write hints. They don't help us here. */
2866: /*ARGSUSED*/
1.6 ! noro 2867: void GC_remove_protection(h, nblocks, is_ptrfree)
1.1 noro 2868: struct hblk *h;
1.6 ! noro 2869: word nblocks;
! 2870: GC_bool is_ptrfree;
1.1 noro 2871: {
2872: }
2873:
1.6 ! noro 2874: #ifdef GC_SOLARIS_THREADS
1.1 noro 2875: # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2876: #else
2877: # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2878: #endif
2879:
2880: void GC_read_dirty()
2881: {
2882: unsigned long ps, np;
2883: int nmaps;
2884: ptr_t vaddr;
2885: struct prasmap * map;
2886: char * bufp;
2887: ptr_t current_addr, limit;
2888: int i;
2889: int dummy;
2890:
2891: BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
2892:
2893: bufp = GC_proc_buf;
2894: if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2895: # ifdef PRINTSTATS
2896: GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2897: GC_proc_buf_size);
2898: # endif
2899: {
2900: /* Retry with larger buffer. */
2901: word new_size = 2 * GC_proc_buf_size;
2902: char * new_buf = GC_scratch_alloc(new_size);
2903:
2904: if (new_buf != 0) {
2905: GC_proc_buf = bufp = new_buf;
2906: GC_proc_buf_size = new_size;
2907: }
2908: if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2909: WARN("Insufficient space for /proc read\n", 0);
2910: /* Punt: */
2911: memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
2912: memset(GC_written_pages, 0xff, sizeof(page_hash_table));
1.6 ! noro 2913: # ifdef GC_SOLARIS_THREADS
1.1 noro 2914: BZERO(GC_fresh_pages,
2915: MAX_FRESH_PAGES * sizeof (struct hblk *));
2916: # endif
2917: return;
2918: }
2919: }
2920: }
2921: /* Copy dirty bits into GC_grungy_pages */
2922: nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
2923: /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2924: nmaps, PG_REFERENCED, PG_MODIFIED); */
2925: bufp = bufp + sizeof(struct prpageheader);
2926: for (i = 0; i < nmaps; i++) {
2927: map = (struct prasmap *)bufp;
2928: vaddr = (ptr_t)(map -> pr_vaddr);
2929: ps = map -> pr_pagesize;
2930: np = map -> pr_npage;
2931: /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2932: limit = vaddr + ps * np;
2933: bufp += sizeof (struct prasmap);
2934: for (current_addr = vaddr;
2935: current_addr < limit; current_addr += ps){
2936: if ((*bufp++) & PG_MODIFIED) {
2937: register struct hblk * h = (struct hblk *) current_addr;
2938:
2939: while ((ptr_t)h < current_addr + ps) {
2940: register word index = PHT_HASH(h);
2941:
2942: set_pht_entry_from_index(GC_grungy_pages, index);
1.6 ! noro 2943: # ifdef GC_SOLARIS_THREADS
1.1 noro 2944: {
2945: register int slot = FRESH_PAGE_SLOT(h);
2946:
2947: if (GC_fresh_pages[slot] == h) {
2948: GC_fresh_pages[slot] = 0;
2949: }
2950: }
2951: # endif
2952: h++;
2953: }
2954: }
2955: }
2956: bufp += sizeof(long) - 1;
2957: bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
2958: }
2959: /* Update GC_written_pages. */
2960: GC_or_pages(GC_written_pages, GC_grungy_pages);
1.6 ! noro 2961: # ifdef GC_SOLARIS_THREADS
1.1 noro 2962: /* Make sure that old stacks are considered completely clean */
2963: /* unless written again. */
2964: GC_old_stacks_are_fresh();
2965: # endif
2966: }
2967:
2968: #undef READ
2969:
2970: GC_bool GC_page_was_dirty(h)
2971: struct hblk *h;
2972: {
2973: register word index = PHT_HASH(h);
2974: register GC_bool result;
2975:
2976: result = get_pht_entry_from_index(GC_grungy_pages, index);
1.6 ! noro 2977: # ifdef GC_SOLARIS_THREADS
1.1 noro 2978: if (result && PAGE_IS_FRESH(h)) result = FALSE;
2979: /* This happens only if page was declared fresh since */
2980: /* the read_dirty call, e.g. because it's in an unused */
2981: /* thread stack. It's OK to treat it as clean, in */
2982: /* that case. And it's consistent with */
2983: /* GC_page_was_ever_dirty. */
2984: # endif
2985: return(result);
2986: }
2987:
2988: GC_bool GC_page_was_ever_dirty(h)
2989: struct hblk *h;
2990: {
2991: register word index = PHT_HASH(h);
2992: register GC_bool result;
2993:
2994: result = get_pht_entry_from_index(GC_written_pages, index);
1.6 ! noro 2995: # ifdef GC_SOLARIS_THREADS
1.1 noro 2996: if (result && PAGE_IS_FRESH(h)) result = FALSE;
2997: # endif
2998: return(result);
2999: }
3000:
3001: /* Caller holds allocation lock. */
3002: void GC_is_fresh(h, n)
3003: struct hblk *h;
3004: word n;
3005: {
3006:
3007: register word index;
3008:
1.6 ! noro 3009: # ifdef GC_SOLARIS_THREADS
1.1 noro 3010: register word i;
3011:
3012: if (GC_fresh_pages != 0) {
3013: for (i = 0; i < n; i++) {
3014: ADD_FRESH_PAGE(h + i);
3015: }
3016: }
3017: # endif
3018: }
3019:
3020: # endif /* PROC_VDB */
3021:
3022:
3023: # ifdef PCR_VDB
3024:
3025: # include "vd/PCR_VD.h"
3026:
3027: # define NPAGES (32*1024) /* 128 MB */
3028:
3029: PCR_VD_DB GC_grungy_bits[NPAGES];
3030:
3031: ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
3032: /* HBLKSIZE aligned. */
3033:
3034: void GC_dirty_init()
3035: {
3036: GC_dirty_maintained = TRUE;
3037: /* For the time being, we assume the heap generally grows up */
3038: GC_vd_base = GC_heap_sects[0].hs_start;
3039: if (GC_vd_base == 0) {
3040: ABORT("Bad initial heap segment");
3041: }
3042: if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3043: != PCR_ERes_okay) {
3044: ABORT("dirty bit initialization failed");
3045: }
3046: }
3047:
3048: void GC_read_dirty()
3049: {
3050: /* lazily enable dirty bits on newly added heap sects */
3051: {
3052: static int onhs = 0;
3053: int nhs = GC_n_heap_sects;
3054: for( ; onhs < nhs; onhs++ ) {
3055: PCR_VD_WriteProtectEnable(
3056: GC_heap_sects[onhs].hs_start,
3057: GC_heap_sects[onhs].hs_bytes );
3058: }
3059: }
3060:
3061:
3062: if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3063: != PCR_ERes_okay) {
3064: ABORT("dirty bit read failed");
3065: }
3066: }
3067:
3068: GC_bool GC_page_was_dirty(h)
3069: struct hblk *h;
3070: {
3071: if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
3072: return(TRUE);
3073: }
3074: return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
3075: }
3076:
3077: /*ARGSUSED*/
1.6 ! noro 3078: void GC_remove_protection(h, nblocks, is_ptrfree)
1.1 noro 3079: struct hblk *h;
1.6 ! noro 3080: word nblocks;
! 3081: GC_bool is_ptrfree;
1.1 noro 3082: {
1.6 ! noro 3083: PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
! 3084: PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
1.1 noro 3085: }
3086:
3087: # endif /* PCR_VDB */
3088:
1.6 ! noro 3089: # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
! 3090: int GC_incremental_protection_needs()
! 3091: {
! 3092: return GC_PROTECTS_NONE;
! 3093: }
! 3094: # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
! 3095:
1.1 noro 3096: /*
3097: * Call stack save code for debugging.
3098: * Should probably be in mach_dep.c, but that requires reorganization.
3099: */
1.4 noro 3100:
3101: /* I suspect the following works for most X86 *nix variants, so */
3102: /* long as the frame pointer is explicitly stored. In the case of gcc, */
3103: /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
3104: #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
1.6 ! noro 3105: # include <features.h>
! 3106:
1.4 noro 3107: struct frame {
3108: struct frame *fr_savfp;
3109: long fr_savpc;
3110: long fr_arg[NARGS]; /* All the arguments go here. */
3111: };
3112: #endif
3113:
3114: #if defined(SPARC)
3115: # if defined(LINUX)
1.6 ! noro 3116: # include <features.h>
! 3117:
1.4 noro 3118: struct frame {
3119: long fr_local[8];
3120: long fr_arg[6];
3121: struct frame *fr_savfp;
3122: long fr_savpc;
3123: # ifndef __arch64__
3124: char *fr_stret;
3125: # endif
3126: long fr_argd[6];
3127: long fr_argx[0];
3128: };
3129: # else
3130: # if defined(SUNOS4)
3131: # include <machine/frame.h>
3132: # else
3133: # if defined (DRSNX)
3134: # include <sys/sparc/frame.h>
3135: # else
3136: # if defined(OPENBSD) || defined(NETBSD)
3137: # include <frame.h>
3138: # else
3139: # include <sys/frame.h>
3140: # endif
3141: # endif
3142: # endif
3143: # endif
3144: # if NARGS > 6
1.1 noro 3145: --> We only know how to to get the first 6 arguments
1.4 noro 3146: # endif
3147: #endif /* SPARC */
1.1 noro 3148:
1.6 ! noro 3149: #ifdef NEED_CALLINFO
1.1 noro 3150: /* Fill in the pc and argument information for up to NFRAMES of my */
3151: /* callers. Ignore my frame and my callers frame. */
3152:
1.6 ! noro 3153: #ifdef LINUX
! 3154: # include <features.h>
! 3155: # if __GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2
! 3156: # define HAVE_BUILTIN_BACKTRACE
! 3157: # ifdef IA64
! 3158: # define BUILTIN_BACKTRACE_BROKEN
! 3159: # endif
! 3160: # endif
! 3161: #endif
! 3162:
! 3163: #include <execinfo.h>
! 3164: #ifdef LINUX
! 3165: # include <unistd.h>
! 3166: #endif
! 3167:
! 3168: #endif /* NEED_CALLINFO */
! 3169:
! 3170: #ifdef SAVE_CALL_CHAIN
! 3171:
! 3172: #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
! 3173: && defined(HAVE_BUILTIN_BACKTRACE)
! 3174:
! 3175: void GC_save_callers (info)
! 3176: struct callinfo info[NFRAMES];
! 3177: {
! 3178: void * tmp_info[NFRAMES + 1];
! 3179: int npcs, i;
! 3180: # define IGNORE_FRAMES 1
! 3181:
! 3182: /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
! 3183: /* points to our own frame. */
! 3184: GC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
! 3185: npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
! 3186: BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
! 3187: for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
! 3188: }
! 3189:
! 3190: #else /* No builtin backtrace; do it ourselves */
! 3191:
1.4 noro 3192: #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
1.1 noro 3193: # define FR_SAVFP fr_fp
3194: # define FR_SAVPC fr_pc
3195: #else
3196: # define FR_SAVFP fr_savfp
3197: # define FR_SAVPC fr_savpc
3198: #endif
3199:
1.4 noro 3200: #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
3201: # define BIAS 2047
3202: #else
3203: # define BIAS 0
3204: #endif
3205:
1.1 noro 3206: void GC_save_callers (info)
3207: struct callinfo info[NFRAMES];
3208: {
3209: struct frame *frame;
3210: struct frame *fp;
3211: int nframes = 0;
1.4 noro 3212: # ifdef I386
3213: /* We assume this is turned on only with gcc as the compiler. */
3214: asm("movl %%ebp,%0" : "=r"(frame));
3215: fp = frame;
3216: # else
3217: word GC_save_regs_in_stack();
1.1 noro 3218:
1.4 noro 3219: frame = (struct frame *) GC_save_regs_in_stack ();
3220: fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
3221: #endif
1.1 noro 3222:
1.4 noro 3223: for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
3224: && (nframes < NFRAMES));
3225: fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
1.1 noro 3226: register int i;
3227:
3228: info[nframes].ci_pc = fp->FR_SAVPC;
1.6 ! noro 3229: # if NARGS > 0
! 3230: for (i = 0; i < NARGS; i++) {
! 3231: info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
! 3232: }
! 3233: # endif /* NARGS > 0 */
1.1 noro 3234: }
3235: if (nframes < NFRAMES) info[nframes].ci_pc = 0;
3236: }
3237:
1.6 ! noro 3238: #endif /* No builtin backtrace */
! 3239:
1.1 noro 3240: #endif /* SAVE_CALL_CHAIN */
1.6 ! noro 3241:
! 3242: #ifdef NEED_CALLINFO
! 3243:
! 3244: /* Print info to stderr. We do NOT hold the allocation lock */
! 3245: void GC_print_callers (info)
! 3246: struct callinfo info[NFRAMES];
! 3247: {
! 3248: register int i;
! 3249: static int reentry_count = 0;
! 3250:
! 3251: LOCK();
! 3252: ++reentry_count;
! 3253: UNLOCK();
! 3254:
! 3255: # if NFRAMES == 1
! 3256: GC_err_printf0("\tCaller at allocation:\n");
! 3257: # else
! 3258: GC_err_printf0("\tCall chain at allocation:\n");
! 3259: # endif
! 3260: for (i = 0; i < NFRAMES; i++) {
! 3261: if (info[i].ci_pc == 0) break;
! 3262: # if NARGS > 0
! 3263: {
! 3264: int j;
! 3265:
! 3266: GC_err_printf0("\t\targs: ");
! 3267: for (j = 0; j < NARGS; j++) {
! 3268: if (j != 0) GC_err_printf0(", ");
! 3269: GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
! 3270: ~(info[i].ci_arg[j]));
! 3271: }
! 3272: GC_err_printf0("\n");
! 3273: }
! 3274: # endif
! 3275: if (reentry_count > 1) {
! 3276: /* We were called during an allocation during */
! 3277: /* a previous GC_print_callers call; punt. */
! 3278: GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
! 3279: continue;
! 3280: }
! 3281: {
! 3282: # ifdef LINUX
! 3283: FILE *pipe;
! 3284: # endif
! 3285: # if defined(HAVE_BUILTIN_BACKTRACE) && \
! 3286: !defined(BUILTIN_BACKTRACE_BROKEN)
! 3287: char **sym_name =
! 3288: backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
! 3289: char *name = sym_name[0];
! 3290: GC_bool found_it = (strchr(name, '(') != 0);
! 3291: # else
! 3292: char buf[40];
! 3293: char *name = buf;
! 3294: GC_bool fount_it = FALSE:
! 3295: sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc);
! 3296: # endif
! 3297: # ifdef LINUX
! 3298: if (!found_it) {
! 3299: # define EXE_SZ 100
! 3300: static char exe_name[EXE_SZ];
! 3301: # define CMD_SZ 200
! 3302: char cmd_buf[CMD_SZ];
! 3303: # define RESULT_SZ 200
! 3304: static char result_buf[RESULT_SZ];
! 3305: size_t result_len;
! 3306: static GC_bool found_exe_name = FALSE;
! 3307: static GC_bool will_fail = FALSE;
! 3308: int ret_code;
! 3309: /* Unfortunately, this is the common case for the */
! 3310: /* main executable. */
! 3311: /* Try to get it via a hairy and expensive scheme. */
! 3312: /* First we get the name of the executable: */
! 3313: if (will_fail) goto out;
! 3314: if (!found_exe_name) {
! 3315: ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
! 3316: if (ret_code < 0 || ret_code >= EXE_SZ
! 3317: || exe_name[0] != '/') {
! 3318: will_fail = TRUE; /* Dont try again. */
! 3319: goto out;
! 3320: }
! 3321: exe_name[ret_code] = '\0';
! 3322: found_exe_name = TRUE;
! 3323: }
! 3324: /* Then we use popen to start addr2line -e <exe> <addr> */
! 3325: /* There are faster ways to do this, but hopefully this */
! 3326: /* isn't time critical. */
! 3327: sprintf(cmd_buf, "/usr/bin/addr2line -e %s 0x%lx", exe_name,
! 3328: (unsigned long)info[i].ci_pc);
! 3329: pipe = popen(cmd_buf, "r");
! 3330: if (pipe < 0 || fgets(result_buf, RESULT_SZ, pipe) == 0) {
! 3331: will_fail = TRUE;
! 3332: goto out;
! 3333: }
! 3334: result_len = strlen(result_buf);
! 3335: if (result_buf[result_len - 1] == '\n') --result_len;
! 3336: if (result_buf[0] == '?'
! 3337: || result_buf[result_len-2] == ':'
! 3338: && result_buf[result_len-1] == '0')
! 3339: goto out;
! 3340: if (result_len < RESULT_SZ - 25) {
! 3341: /* Add in hex address */
! 3342: sprintf(result_buf + result_len, " [0x%lx]",
! 3343: (unsigned long)info[i].ci_pc);
! 3344: }
! 3345: name = result_buf;
! 3346: pclose(pipe);
! 3347: out:
! 3348: }
! 3349: # endif /* LINUX */
! 3350: GC_err_printf1("\t\t%s\n", name);
! 3351: free(sym_name); /* May call GC_free; that's OK */
! 3352: }
! 3353: }
! 3354: LOCK();
! 3355: --reentry_count;
! 3356: UNLOCK();
! 3357: }
! 3358:
! 3359: #endif /* NEED_CALLINFO */
1.1 noro 3360:
1.4 noro 3361: #if defined(LINUX) && defined(__ELF__) && \
3362: (!defined(SMALL_CONFIG) || defined(USE_PROC_FOR_LIBRARIES))
3363: #ifdef GC_USE_LD_WRAP
3364: # define READ __real_read
3365: #else
3366: # define READ read
3367: #endif
3368:
3369:
3370: /* Repeatedly perform a read call until the buffer is filled or */
3371: /* we encounter EOF. */
3372: ssize_t GC_repeat_read(int fd, char *buf, size_t count)
3373: {
3374: ssize_t num_read = 0;
3375: ssize_t result;
3376:
3377: while (num_read < count) {
3378: result = READ(fd, buf + num_read, count - num_read);
3379: if (result < 0) return result;
3380: if (result == 0) break;
3381: num_read += result;
3382: }
3383: return num_read;
3384: }
3385: #endif /* LINUX && ... */
3386:
3387:
3388: #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
3389:
3390: /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
3391: addresses in FIND_LEAK output. */
3392:
3393: void GC_print_address_map()
3394: {
3395: int f;
3396: int result;
3397: char maps_temp[32768];
3398: GC_err_printf0("---------- Begin address map ----------\n");
3399: f = open("/proc/self/maps", O_RDONLY);
3400: if (-1 == f) ABORT("Couldn't open /proc/self/maps");
3401: do {
3402: result = GC_repeat_read(f, maps_temp, sizeof(maps_temp));
3403: if (result <= 0) ABORT("Couldn't read /proc/self/maps");
3404: GC_err_write(maps_temp, result);
3405: } while (result == sizeof(maps_temp));
3406:
3407: GC_err_printf0("---------- End address map ----------\n");
3408: }
3409:
3410: #endif
1.1 noro 3411:
3412:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>