Annotation of OpenXM_contrib2/asir2000/gc/os_dep.c, Revision 1.3
1.1 noro 1: int ox_usr1_sent, ox_int_received, critical_when_signal;
2: static int inside_critical_section;
3:
4: /*
1.2 noro 5: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
1.1 noro 6: * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
1.2 noro 7: * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
8: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
1.1 noro 9: *
10: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12: *
13: * Permission is hereby granted to use or copy this program
14: * for any purpose, provided the above notices are retained on all copies.
15: * Permission to modify the code and to distribute modified code is granted,
16: * provided the above notices are retained, and a notice that the code was
17: * modified is included with the above copyright notice.
18: */
19:
20: # include "gc_priv.h"
21:
22: # if defined(LINUX) && !defined(POWERPC)
23: # include <linux/version.h>
24: # if (LINUX_VERSION_CODE <= 0x10400)
25: /* Ugly hack to get struct sigcontext_struct definition. Required */
26: /* for some early 1.3.X releases. Will hopefully go away soon. */
27: /* in some later Linux releases, asm/sigcontext.h may have to */
28: /* be included instead. */
29: # define __KERNEL__
30: # include <asm/signal.h>
31: # undef __KERNEL__
32: # else
33: /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
34: /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
35: /* prototypes, so we have to include the top-level sigcontext.h to */
36: /* make sure the former gets defined to be the latter if appropriate. */
37: # include <features.h>
38: # if 2 <= __GLIBC__
1.2 noro 39: # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
1.1 noro 40: /* glibc 2.1 no longer has sigcontext.h. But signal.h */
41: /* has the right declaration for glibc 2.1. */
42: # include <sigcontext.h>
43: # endif /* 0 == __GLIBC_MINOR__ */
44: # else /* not 2 <= __GLIBC__ */
45: /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
46: /* one. Check LINUX_VERSION_CODE to see which we should reference. */
47: # include <asm/sigcontext.h>
48: # endif /* 2 <= __GLIBC__ */
49: # endif
50: # endif
51: # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
52: # include <sys/types.h>
53: # if !defined(MSWIN32) && !defined(SUNOS4)
54: # include <unistd.h>
55: # endif
56: # endif
57:
58: # include <stdio.h>
59: # include <signal.h>
60:
61: /* Blatantly OS dependent routines, except for those that are related */
1.2 noro 62: /* to dynamic loading. */
1.1 noro 63:
64: # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
65: # define NEED_FIND_LIMIT
66: # endif
67:
1.2 noro 68: # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
1.1 noro 69: # define NEED_FIND_LIMIT
70: # endif
71:
1.3 ! noro 72: # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
1.1 noro 73: # define NEED_FIND_LIMIT
74: # endif
75:
76: # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
77: # define NEED_FIND_LIMIT
78: # endif
79:
1.2 noro 80: # if defined(LINUX) && \
1.3 ! noro 81: (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64) \
! 82: || defined(MIPS))
1.1 noro 83: # define NEED_FIND_LIMIT
84: # endif
85:
86: #ifdef NEED_FIND_LIMIT
87: # include <setjmp.h>
88: #endif
89:
90: #ifdef FREEBSD
91: # include <machine/trap.h>
92: #endif
93:
94: #ifdef AMIGA
95: # include <proto/exec.h>
96: # include <proto/dos.h>
97: # include <dos/dosextens.h>
98: # include <workbench/startup.h>
99: #endif
100:
101: #ifdef MSWIN32
102: # define WIN32_LEAN_AND_MEAN
103: # define NOSERVICE
104: # include <windows.h>
105: #endif
106:
107: #ifdef MACOS
108: # include <Processes.h>
109: #endif
110:
111: #ifdef IRIX5
112: # include <sys/uio.h>
113: # include <malloc.h> /* for locking */
114: #endif
115: #ifdef USE_MMAP
116: # include <sys/types.h>
117: # include <sys/mman.h>
118: # include <sys/stat.h>
119: # include <fcntl.h>
120: #endif
121:
122: #ifdef SUNOS5SIGS
123: # include <sys/siginfo.h>
124: # undef setjmp
125: # undef longjmp
126: # define setjmp(env) sigsetjmp(env, 1)
127: # define longjmp(env, val) siglongjmp(env, val)
128: # define jmp_buf sigjmp_buf
129: #endif
130:
131: #ifdef DJGPP
132: /* Apparently necessary for djgpp 2.01. May casuse problems with */
133: /* other versions. */
134: typedef long unsigned int caddr_t;
135: #endif
136:
137: #ifdef PCR
138: # include "il/PCR_IL.h"
139: # include "th/PCR_ThCtl.h"
140: # include "mm/PCR_MM.h"
141: #endif
142:
143: #if !defined(NO_EXECUTE_PERMISSION)
144: # define OPT_PROT_EXEC PROT_EXEC
145: #else
146: # define OPT_PROT_EXEC 0
147: #endif
148:
1.3 ! noro 149: #if defined(SEARCH_FOR_DATA_START)
! 150: /* The following doesn't work if the GC is in a dynamic library. */
1.1 noro 151: /* The I386 case can be handled without a search. The Alpha case */
152: /* used to be handled differently as well, but the rules changed */
153: /* for recent Linux versions. This seems to be the easiest way to */
154: /* cover all versions. */
155: ptr_t GC_data_start;
156:
157: extern char * GC_copyright[]; /* Any data symbol would do. */
158:
159: void GC_init_linux_data_start()
160: {
161: extern ptr_t GC_find_limit();
162:
163: GC_data_start = GC_find_limit((ptr_t)GC_copyright, FALSE);
164: }
165: #endif
166:
167: # ifdef OS2
168:
169: # include <stddef.h>
170:
171: # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
172:
173: struct exe_hdr {
174: unsigned short magic_number;
175: unsigned short padding[29];
176: long new_exe_offset;
177: };
178:
179: #define E_MAGIC(x) (x).magic_number
180: #define EMAGIC 0x5A4D
181: #define E_LFANEW(x) (x).new_exe_offset
182:
183: struct e32_exe {
184: unsigned char magic_number[2];
185: unsigned char byte_order;
186: unsigned char word_order;
187: unsigned long exe_format_level;
188: unsigned short cpu;
189: unsigned short os;
190: unsigned long padding1[13];
191: unsigned long object_table_offset;
192: unsigned long object_count;
193: unsigned long padding2[31];
194: };
195:
196: #define E32_MAGIC1(x) (x).magic_number[0]
197: #define E32MAGIC1 'L'
198: #define E32_MAGIC2(x) (x).magic_number[1]
199: #define E32MAGIC2 'X'
200: #define E32_BORDER(x) (x).byte_order
201: #define E32LEBO 0
202: #define E32_WORDER(x) (x).word_order
203: #define E32LEWO 0
204: #define E32_CPU(x) (x).cpu
205: #define E32CPU286 1
206: #define E32_OBJTAB(x) (x).object_table_offset
207: #define E32_OBJCNT(x) (x).object_count
208:
209: struct o32_obj {
210: unsigned long size;
211: unsigned long base;
212: unsigned long flags;
213: unsigned long pagemap;
214: unsigned long mapsize;
215: unsigned long reserved;
216: };
217:
218: #define O32_FLAGS(x) (x).flags
219: #define OBJREAD 0x0001L
220: #define OBJWRITE 0x0002L
221: #define OBJINVALID 0x0080L
222: #define O32_SIZE(x) (x).size
223: #define O32_BASE(x) (x).base
224:
225: # else /* IBM's compiler */
226:
227: /* A kludge to get around what appears to be a header file bug */
228: # ifndef WORD
229: # define WORD unsigned short
230: # endif
231: # ifndef DWORD
232: # define DWORD unsigned long
233: # endif
234:
235: # define EXE386 1
236: # include <newexe.h>
237: # include <exe386.h>
238:
239: # endif /* __IBMC__ */
240:
241: # define INCL_DOSEXCEPTIONS
242: # define INCL_DOSPROCESS
243: # define INCL_DOSERRORS
244: # define INCL_DOSMODULEMGR
245: # define INCL_DOSMEMMGR
246: # include <os2.h>
247:
248:
249: /* Disable and enable signals during nontrivial allocations */
250:
251: void GC_disable_signals(void)
252: {
253: ULONG nest;
254:
255: DosEnterMustComplete(&nest);
256: if (nest != 1) ABORT("nested GC_disable_signals");
257: }
258:
259: void GC_enable_signals(void)
260: {
261: ULONG nest;
262:
263: DosExitMustComplete(&nest);
264: if (nest != 0) ABORT("GC_enable_signals");
265: }
266:
267:
268: # else
269:
270: # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
271: && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW)
272:
273: # if defined(sigmask) && !defined(UTS4)
274: /* Use the traditional BSD interface */
275: # define SIGSET_T int
276: # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
277: # define SIG_FILL(set) (set) = 0x7fffffff
278: /* Setting the leading bit appears to provoke a bug in some */
279: /* longjmp implementations. Most systems appear not to have */
280: /* a signal 32. */
281: # define SIGSETMASK(old, new) (old) = sigsetmask(new)
282: # else
283: /* Use POSIX/SYSV interface */
284: # define SIGSET_T sigset_t
285: # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
286: # define SIG_FILL(set) sigfillset(&set)
287: # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
288: # endif
289:
290: static GC_bool mask_initialized = FALSE;
291:
292: static SIGSET_T new_mask;
293:
294: static SIGSET_T old_mask;
295:
296: static SIGSET_T dummy;
297:
298: #if defined(PRINTSTATS) && !defined(THREADS)
299: # define CHECK_SIGNALS
300: int GC_sig_disabled = 0;
301: #endif
302:
303: void GC_disable_signals()
304: {
305: if (!mask_initialized) {
306: SIG_FILL(new_mask);
307:
308: SIG_DEL(new_mask, SIGSEGV);
309: SIG_DEL(new_mask, SIGILL);
310: SIG_DEL(new_mask, SIGQUIT);
311: # ifdef SIGBUS
312: SIG_DEL(new_mask, SIGBUS);
313: # endif
314: # ifdef SIGIOT
315: SIG_DEL(new_mask, SIGIOT);
316: # endif
317: # ifdef SIGEMT
318: SIG_DEL(new_mask, SIGEMT);
319: # endif
320: # ifdef SIGTRAP
321: SIG_DEL(new_mask, SIGTRAP);
322: # endif
323: mask_initialized = TRUE;
324: }
325: # ifdef CHECK_SIGNALS
326: if (GC_sig_disabled != 0) ABORT("Nested disables");
327: GC_sig_disabled++;
328: # endif
329: SIGSETMASK(old_mask,new_mask);
330: if ( critical_when_signal )
331: inside_critical_section = 1;
332: else {
333: inside_critical_section = 0;
334: critical_when_signal = 1;
335: }
336: }
337:
338: void GC_enable_signals()
339: {
340: # ifdef CHECK_SIGNALS
341: if (GC_sig_disabled != 1) ABORT("Unmatched enable");
342: GC_sig_disabled--;
343: # endif
344: SIGSETMASK(dummy,old_mask);
345: if ( !inside_critical_section ) {
346: critical_when_signal = 0;
347: if ( ox_usr1_sent ) {
348: ox_usr1_sent = 0; ox_usr1_handler();
349: }
350: if ( ox_int_received ) {
351: ox_int_received = 0; int_handler();
352: }
353: } else
354: inside_critical_section = 0;
355: }
356:
357: # endif /* !PCR */
358:
359: # endif /*!OS/2 */
360:
361: /* Ivan Demakov: simplest way (to me) */
362: #ifdef DOS4GW
363: void GC_disable_signals() { }
364: void GC_enable_signals() { }
365: #endif
366:
367: /* Find the page size */
368: word GC_page_size;
369:
370: # ifdef MSWIN32
371: void GC_setpagesize()
372: {
373: SYSTEM_INFO sysinfo;
374:
375: GetSystemInfo(&sysinfo);
376: GC_page_size = sysinfo.dwPageSize;
377: }
378:
379: # else
380: # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
381: || defined(USE_MUNMAP)
382: void GC_setpagesize()
383: {
384: GC_page_size = GETPAGESIZE();
385: }
386: # else
387: /* It's acceptable to fake it. */
388: void GC_setpagesize()
389: {
390: GC_page_size = HBLKSIZE;
391: }
392: # endif
393: # endif
394:
395: /*
396: * Find the base of the stack.
397: * Used only in single-threaded environment.
398: * With threads, GC_mark_roots needs to know how to do this.
399: * Called with allocator lock held.
400: */
401: # ifdef MSWIN32
402: # define is_writable(prot) ((prot) == PAGE_READWRITE \
403: || (prot) == PAGE_WRITECOPY \
404: || (prot) == PAGE_EXECUTE_READWRITE \
405: || (prot) == PAGE_EXECUTE_WRITECOPY)
406: /* Return the number of bytes that are writable starting at p. */
407: /* The pointer p is assumed to be page aligned. */
408: /* If base is not 0, *base becomes the beginning of the */
409: /* allocation region containing p. */
410: word GC_get_writable_length(ptr_t p, ptr_t *base)
411: {
412: MEMORY_BASIC_INFORMATION buf;
413: word result;
414: word protect;
415:
416: result = VirtualQuery(p, &buf, sizeof(buf));
417: if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
418: if (base != 0) *base = (ptr_t)(buf.AllocationBase);
419: protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
420: if (!is_writable(protect)) {
421: return(0);
422: }
423: if (buf.State != MEM_COMMIT) return(0);
424: return(buf.RegionSize);
425: }
426:
427: ptr_t GC_get_stack_base()
428: {
429: int dummy;
430: ptr_t sp = (ptr_t)(&dummy);
431: ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
432: word size = GC_get_writable_length(trunc_sp, 0);
433:
434: return(trunc_sp + size);
435: }
436:
437:
438: # else
439:
440: # ifdef OS2
441:
442: ptr_t GC_get_stack_base()
443: {
444: PTIB ptib;
445: PPIB ppib;
446:
447: if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
448: GC_err_printf0("DosGetInfoBlocks failed\n");
449: ABORT("DosGetInfoBlocks failed\n");
450: }
451: return((ptr_t)(ptib -> tib_pstacklimit));
452: }
453:
454: # else
455:
456: # ifdef AMIGA
457:
458: ptr_t GC_get_stack_base()
459: {
460: struct Process *proc = (struct Process*)SysBase->ThisTask;
461:
462: /* Reference: Amiga Guru Book Pages: 42,567,574 */
463: if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS
464: && proc->pr_CLI != NULL) {
465: /* first ULONG is StackSize */
466: /*longPtr = proc->pr_ReturnAddr;
467: size = longPtr[0];*/
468:
469: return (char *)proc->pr_ReturnAddr + sizeof(ULONG);
470: } else {
471: return (char *)proc->pr_Task.tc_SPUpper;
472: }
473: }
474:
475: #if 0 /* old version */
476: ptr_t GC_get_stack_base()
477: {
478: extern struct WBStartup *_WBenchMsg;
479: extern long __base;
480: extern long __stack;
481: struct Task *task;
482: struct Process *proc;
483: struct CommandLineInterface *cli;
484: long size;
485:
486: if ((task = FindTask(0)) == 0) {
487: GC_err_puts("Cannot find own task structure\n");
488: ABORT("task missing");
489: }
490: proc = (struct Process *)task;
491: cli = BADDR(proc->pr_CLI);
492:
493: if (_WBenchMsg != 0 || cli == 0) {
494: size = (char *)task->tc_SPUpper - (char *)task->tc_SPLower;
495: } else {
496: size = cli->cli_DefaultStack * 4;
497: }
498: return (ptr_t)(__base + GC_max(size, __stack));
499: }
500: #endif /* 0 */
501:
502: # else /* !AMIGA, !OS2, ... */
503:
504: # ifdef NEED_FIND_LIMIT
505: /* Some tools to implement HEURISTIC2 */
506: # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
507: /* static */ jmp_buf GC_jmp_buf;
508:
509: /*ARGSUSED*/
510: void GC_fault_handler(sig)
511: int sig;
512: {
513: longjmp(GC_jmp_buf, 1);
514: }
515:
516: # ifdef __STDC__
517: typedef void (*handler)(int);
518: # else
519: typedef void (*handler)();
520: # endif
521:
522: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
523: static struct sigaction old_segv_act;
1.2 noro 524: # if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
1.1 noro 525: static struct sigaction old_bus_act;
526: # endif
527: # else
528: static handler old_segv_handler, old_bus_handler;
529: # endif
530:
531: void GC_setup_temporary_fault_handler()
532: {
533: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
534: struct sigaction act;
535:
536: act.sa_handler = GC_fault_handler;
537: act.sa_flags = SA_RESTART | SA_NODEFER;
538: /* The presence of SA_NODEFER represents yet another gross */
539: /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
540: /* interact correctly with -lthread. We hide the confusion */
541: /* by making sure that signal handling doesn't affect the */
542: /* signal mask. */
543:
544: (void) sigemptyset(&act.sa_mask);
545: # ifdef IRIX_THREADS
546: /* Older versions have a bug related to retrieving and */
547: /* and setting a handler at the same time. */
548: (void) sigaction(SIGSEGV, 0, &old_segv_act);
549: (void) sigaction(SIGSEGV, &act, 0);
550: # else
551: (void) sigaction(SIGSEGV, &act, &old_segv_act);
1.2 noro 552: # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
553: || defined(HPUX)
554: /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
555: /* Pthreads doesn't exist under Irix 5.x, so we */
556: /* don't have to worry in the threads case. */
1.1 noro 557: (void) sigaction(SIGBUS, &act, &old_bus_act);
558: # endif
559: # endif /* IRIX_THREADS */
560: # else
561: old_segv_handler = signal(SIGSEGV, GC_fault_handler);
562: # ifdef SIGBUS
563: old_bus_handler = signal(SIGBUS, GC_fault_handler);
564: # endif
565: # endif
566: }
567:
568: void GC_reset_fault_handler()
569: {
570: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
571: (void) sigaction(SIGSEGV, &old_segv_act, 0);
1.2 noro 572: # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
573: || defined(HPUX)
1.1 noro 574: (void) sigaction(SIGBUS, &old_bus_act, 0);
575: # endif
576: # else
577: (void) signal(SIGSEGV, old_segv_handler);
578: # ifdef SIGBUS
579: (void) signal(SIGBUS, old_bus_handler);
580: # endif
581: # endif
582: }
583:
584: /* Return the first nonaddressible location > p (up) or */
585: /* the smallest location q s.t. [q,p] is addressible (!up). */
586: ptr_t GC_find_limit(p, up)
587: ptr_t p;
588: GC_bool up;
589: {
590: static VOLATILE ptr_t result;
591: /* Needs to be static, since otherwise it may not be */
592: /* preserved across the longjmp. Can safely be */
593: /* static since it's only called once, with the */
594: /* allocation lock held. */
595:
596:
597: GC_setup_temporary_fault_handler();
598: if (setjmp(GC_jmp_buf) == 0) {
599: result = (ptr_t)(((word)(p))
600: & ~(MIN_PAGE_SIZE-1));
601: for (;;) {
602: if (up) {
603: result += MIN_PAGE_SIZE;
604: } else {
605: result -= MIN_PAGE_SIZE;
606: }
607: GC_noop1((word)(*result));
608: }
609: }
610: GC_reset_fault_handler();
611: if (!up) {
612: result += MIN_PAGE_SIZE;
613: }
614: return(result);
615: }
616: # endif
617:
1.2 noro 618: #ifdef LINUX_STACKBOTTOM
619:
1.3 ! noro 620: #include <sys/types.h>
! 621: #include <sys/stat.h>
! 622: #include <fcntl.h>
! 623:
1.2 noro 624: # define STAT_SKIP 27 /* Number of fields preceding startstack */
1.3 ! noro 625: /* field in /proc/self/stat */
1.2 noro 626:
627: ptr_t GC_linux_stack_base(void)
628: {
1.3 ! noro 629: /* We read the stack base value from /proc/self/stat. We do this */
! 630: /* using direct I/O system calls in order to avoid calling malloc */
! 631: /* in case REDIRECT_MALLOC is defined. */
! 632: # define STAT_BUF_SIZE 4096
! 633: # ifdef USE_LD_WRAP
! 634: # define STAT_READ __real_read
! 635: # else
! 636: # define STAT_READ read
! 637: # endif
! 638: char stat_buf[STAT_BUF_SIZE];
! 639: int f;
1.2 noro 640: char c;
641: word result = 0;
1.3 ! noro 642: size_t i, buf_offset = 0;
1.2 noro 643:
1.3 ! noro 644: f = open("/proc/self/stat", O_RDONLY);
! 645: if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
! 646: ABORT("Couldn't read /proc/self/stat");
! 647: }
! 648: c = stat_buf[buf_offset++];
1.2 noro 649: /* Skip the required number of fields. This number is hopefully */
650: /* constant across all Linux implementations. */
651: for (i = 0; i < STAT_SKIP; ++i) {
1.3 ! noro 652: while (isspace(c)) c = stat_buf[buf_offset++];
! 653: while (!isspace(c)) c = stat_buf[buf_offset++];
1.2 noro 654: }
1.3 ! noro 655: while (isspace(c)) c = stat_buf[buf_offset++];
1.2 noro 656: while (isdigit(c)) {
657: result *= 10;
658: result += c - '0';
1.3 ! noro 659: c = stat_buf[buf_offset++];
1.2 noro 660: }
1.3 ! noro 661: close(f);
1.2 noro 662: if (result < 0x10000000) ABORT("Absurd stack bottom value");
663: return (ptr_t)result;
664: }
665:
666: #endif /* LINUX_STACKBOTTOM */
1.1 noro 667:
668: ptr_t GC_get_stack_base()
669: {
670: word dummy;
671: ptr_t result;
672:
673: # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
674:
675: # ifdef STACKBOTTOM
676: return(STACKBOTTOM);
677: # else
678: # ifdef HEURISTIC1
679: # ifdef STACK_GROWS_DOWN
680: result = (ptr_t)((((word)(&dummy))
681: + STACKBOTTOM_ALIGNMENT_M1)
682: & ~STACKBOTTOM_ALIGNMENT_M1);
683: # else
684: result = (ptr_t)(((word)(&dummy))
685: & ~STACKBOTTOM_ALIGNMENT_M1);
686: # endif
687: # endif /* HEURISTIC1 */
1.2 noro 688: # ifdef LINUX_STACKBOTTOM
689: result = GC_linux_stack_base();
690: # endif
1.1 noro 691: # ifdef HEURISTIC2
692: # ifdef STACK_GROWS_DOWN
693: result = GC_find_limit((ptr_t)(&dummy), TRUE);
694: # ifdef HEURISTIC2_LIMIT
695: if (result > HEURISTIC2_LIMIT
696: && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
697: result = HEURISTIC2_LIMIT;
698: }
699: # endif
700: # else
701: result = GC_find_limit((ptr_t)(&dummy), FALSE);
702: # ifdef HEURISTIC2_LIMIT
703: if (result < HEURISTIC2_LIMIT
704: && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
705: result = HEURISTIC2_LIMIT;
706: }
707: # endif
708: # endif
709:
710: # endif /* HEURISTIC2 */
711: # ifdef STACK_GROWS_DOWN
712: if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
713: # endif
714: return(result);
715: # endif /* STACKBOTTOM */
716: }
717:
718: # endif /* ! AMIGA */
719: # endif /* ! OS2 */
720: # endif /* ! MSWIN32 */
721:
722: /*
723: * Register static data segment(s) as roots.
724: * If more data segments are added later then they need to be registered
725: * add that point (as we do with SunOS dynamic loading),
726: * or GC_mark_roots needs to check for them (as we do with PCR).
727: * Called with allocator lock held.
728: */
729:
730: # ifdef OS2
731:
732: void GC_register_data_segments()
733: {
734: PTIB ptib;
735: PPIB ppib;
736: HMODULE module_handle;
737: # define PBUFSIZ 512
738: UCHAR path[PBUFSIZ];
739: FILE * myexefile;
740: struct exe_hdr hdrdos; /* MSDOS header. */
741: struct e32_exe hdr386; /* Real header for my executable */
742: struct o32_obj seg; /* Currrent segment */
743: int nsegs;
744:
745:
746: if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
747: GC_err_printf0("DosGetInfoBlocks failed\n");
748: ABORT("DosGetInfoBlocks failed\n");
749: }
750: module_handle = ppib -> pib_hmte;
751: if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
752: GC_err_printf0("DosQueryModuleName failed\n");
753: ABORT("DosGetInfoBlocks failed\n");
754: }
755: myexefile = fopen(path, "rb");
756: if (myexefile == 0) {
757: GC_err_puts("Couldn't open executable ");
758: GC_err_puts(path); GC_err_puts("\n");
759: ABORT("Failed to open executable\n");
760: }
761: if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
762: GC_err_puts("Couldn't read MSDOS header from ");
763: GC_err_puts(path); GC_err_puts("\n");
764: ABORT("Couldn't read MSDOS header");
765: }
766: if (E_MAGIC(hdrdos) != EMAGIC) {
767: GC_err_puts("Executable has wrong DOS magic number: ");
768: GC_err_puts(path); GC_err_puts("\n");
769: ABORT("Bad DOS magic number");
770: }
771: if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
772: GC_err_puts("Seek to new header failed in ");
773: GC_err_puts(path); GC_err_puts("\n");
774: ABORT("Bad DOS magic number");
775: }
776: if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
777: GC_err_puts("Couldn't read MSDOS header from ");
778: GC_err_puts(path); GC_err_puts("\n");
779: ABORT("Couldn't read OS/2 header");
780: }
781: if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
782: GC_err_puts("Executable has wrong OS/2 magic number:");
783: GC_err_puts(path); GC_err_puts("\n");
784: ABORT("Bad OS/2 magic number");
785: }
786: if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
787: GC_err_puts("Executable %s has wrong byte order: ");
788: GC_err_puts(path); GC_err_puts("\n");
789: ABORT("Bad byte order");
790: }
791: if ( E32_CPU(hdr386) == E32CPU286) {
792: GC_err_puts("GC can't handle 80286 executables: ");
793: GC_err_puts(path); GC_err_puts("\n");
794: EXIT();
795: }
796: if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
797: SEEK_SET) != 0) {
798: GC_err_puts("Seek to object table failed: ");
799: GC_err_puts(path); GC_err_puts("\n");
800: ABORT("Seek to object table failed");
801: }
802: for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
803: int flags;
804: if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
805: GC_err_puts("Couldn't read obj table entry from ");
806: GC_err_puts(path); GC_err_puts("\n");
807: ABORT("Couldn't read obj table entry");
808: }
809: flags = O32_FLAGS(seg);
810: if (!(flags & OBJWRITE)) continue;
811: if (!(flags & OBJREAD)) continue;
812: if (flags & OBJINVALID) {
813: GC_err_printf0("Object with invalid pages?\n");
814: continue;
815: }
816: GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
817: }
818: }
819:
820: # else
821:
822: # ifdef MSWIN32
823: /* Unfortunately, we have to handle win32s very differently from NT, */
824: /* Since VirtualQuery has very different semantics. In particular, */
825: /* under win32s a VirtualQuery call on an unmapped page returns an */
826: /* invalid result. Under GC_register_data_segments is a noop and */
827: /* all real work is done by GC_register_dynamic_libraries. Under */
828: /* win32s, we cannot find the data segments associated with dll's. */
829: /* We rgister the main data segment here. */
830: GC_bool GC_win32s = FALSE; /* We're running under win32s. */
831:
832: GC_bool GC_is_win32s()
833: {
834: DWORD v = GetVersion();
835:
836: /* Check that this is not NT, and Windows major version <= 3 */
837: return ((v & 0x80000000) && (v & 0xff) <= 3);
838: }
839:
840: void GC_init_win32()
841: {
842: GC_win32s = GC_is_win32s();
843: }
844:
845: /* Return the smallest address a such that VirtualQuery */
846: /* returns correct results for all addresses between a and start. */
847: /* Assumes VirtualQuery returns correct information for start. */
848: ptr_t GC_least_described_address(ptr_t start)
849: {
850: MEMORY_BASIC_INFORMATION buf;
851: SYSTEM_INFO sysinfo;
852: DWORD result;
853: LPVOID limit;
854: ptr_t p;
855: LPVOID q;
856:
857: GetSystemInfo(&sysinfo);
858: limit = sysinfo.lpMinimumApplicationAddress;
859: p = (ptr_t)((word)start & ~(GC_page_size - 1));
860: for (;;) {
861: q = (LPVOID)(p - GC_page_size);
862: if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
863: result = VirtualQuery(q, &buf, sizeof(buf));
864: if (result != sizeof(buf) || buf.AllocationBase == 0) break;
865: p = (ptr_t)(buf.AllocationBase);
866: }
867: return(p);
868: }
869:
870: /* Is p the start of either the malloc heap, or of one of our */
871: /* heap sections? */
872: GC_bool GC_is_heap_base (ptr_t p)
873: {
874:
875: register unsigned i;
876:
877: # ifndef REDIRECT_MALLOC
878: static ptr_t malloc_heap_pointer = 0;
879:
880: if (0 == malloc_heap_pointer) {
881: MEMORY_BASIC_INFORMATION buf;
882: register DWORD result = VirtualQuery(malloc(1), &buf, sizeof(buf));
883:
884: if (result != sizeof(buf)) {
885: ABORT("Weird VirtualQuery result");
886: }
887: malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
888: }
889: if (p == malloc_heap_pointer) return(TRUE);
890: # endif
891: for (i = 0; i < GC_n_heap_bases; i++) {
892: if (GC_heap_bases[i] == p) return(TRUE);
893: }
894: return(FALSE);
895: }
896:
897: void GC_register_root_section(ptr_t static_root)
898: {
899: MEMORY_BASIC_INFORMATION buf;
900: SYSTEM_INFO sysinfo;
901: DWORD result;
902: DWORD protect;
903: LPVOID p;
904: char * base;
905: char * limit, * new_limit;
906:
907: if (!GC_win32s) return;
908: p = base = limit = GC_least_described_address(static_root);
909: GetSystemInfo(&sysinfo);
910: while (p < sysinfo.lpMaximumApplicationAddress) {
911: result = VirtualQuery(p, &buf, sizeof(buf));
912: if (result != sizeof(buf) || buf.AllocationBase == 0
913: || GC_is_heap_base(buf.AllocationBase)) break;
914: new_limit = (char *)p + buf.RegionSize;
915: protect = buf.Protect;
916: if (buf.State == MEM_COMMIT
917: && is_writable(protect)) {
918: if ((char *)p == limit) {
919: limit = new_limit;
920: } else {
921: if (base != limit) GC_add_roots_inner(base, limit, FALSE);
922: base = p;
923: limit = new_limit;
924: }
925: }
926: if (p > (LPVOID)new_limit /* overflow */) break;
927: p = (LPVOID)new_limit;
928: }
929: if (base != limit) GC_add_roots_inner(base, limit, FALSE);
930: }
931:
932: void GC_register_data_segments()
933: {
934: static char dummy;
935:
936: GC_register_root_section((ptr_t)(&dummy));
937: }
938: # else
939: # ifdef AMIGA
940:
941: void GC_register_data_segments()
942: {
943: struct Process *proc;
944: struct CommandLineInterface *cli;
945: BPTR myseglist;
946: ULONG *data;
947:
948: int num;
949:
950:
951: # ifdef __GNUC__
952: ULONG dataSegSize;
953: GC_bool found_segment = FALSE;
954: extern char __data_size[];
955:
956: dataSegSize=__data_size+8;
957: /* Can`t find the Location of __data_size, because
958: it`s possible that is it, inside the segment. */
959:
960: # endif
961:
962: proc= (struct Process*)SysBase->ThisTask;
963:
964: /* Reference: Amiga Guru Book Pages: 538ff,565,573
965: and XOper.asm */
966: if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS) {
967: if (proc->pr_CLI == NULL) {
968: myseglist = proc->pr_SegList;
969: } else {
970: /* ProcLoaded 'Loaded as a command: '*/
971: cli = BADDR(proc->pr_CLI);
972: myseglist = cli->cli_Module;
973: }
974: } else {
975: ABORT("Not a Process.");
976: }
977:
978: if (myseglist == NULL) {
979: ABORT("Arrrgh.. can't find segments, aborting");
980: }
981:
982: /* xoper hunks Shell Process */
983:
984: num=0;
985: for (data = (ULONG *)BADDR(myseglist); data != NULL;
986: data = (ULONG *)BADDR(data[0])) {
987: if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
988: ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
989: # ifdef __GNUC__
990: if (dataSegSize == data[-1]) {
991: found_segment = TRUE;
992: }
993: # endif
994: GC_add_roots_inner((char *)&data[1],
995: ((char *)&data[1]) + data[-1], FALSE);
996: }
997: ++num;
998: } /* for */
999: # ifdef __GNUC__
1000: if (!found_segment) {
1001: ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
1002: }
1003: # endif
1004: }
1005:
1006: #if 0 /* old version */
1007: void GC_register_data_segments()
1008: {
1009: extern struct WBStartup *_WBenchMsg;
1010: struct Process *proc;
1011: struct CommandLineInterface *cli;
1012: BPTR myseglist;
1013: ULONG *data;
1014:
1015: if ( _WBenchMsg != 0 ) {
1016: if ((myseglist = _WBenchMsg->sm_Segment) == 0) {
1017: GC_err_puts("No seglist from workbench\n");
1018: return;
1019: }
1020: } else {
1021: if ((proc = (struct Process *)FindTask(0)) == 0) {
1022: GC_err_puts("Cannot find process structure\n");
1023: return;
1024: }
1025: if ((cli = BADDR(proc->pr_CLI)) == 0) {
1026: GC_err_puts("No CLI\n");
1027: return;
1028: }
1029: if ((myseglist = cli->cli_Module) == 0) {
1030: GC_err_puts("No seglist from CLI\n");
1031: return;
1032: }
1033: }
1034:
1035: for (data = (ULONG *)BADDR(myseglist); data != 0;
1036: data = (ULONG *)BADDR(data[0])) {
1037: # ifdef AMIGA_SKIP_SEG
1038: if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
1039: ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
1040: # else
1041: {
1042: # endif /* AMIGA_SKIP_SEG */
1043: GC_add_roots_inner((char *)&data[1],
1044: ((char *)&data[1]) + data[-1], FALSE);
1045: }
1046: }
1047: }
1048: #endif /* old version */
1049:
1050:
1051: # else
1052:
1053: # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
1054: char * GC_SysVGetDataStart(max_page_size, etext_addr)
1055: int max_page_size;
1056: int * etext_addr;
1057: {
1058: word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1059: & ~(sizeof(word) - 1);
1060: /* etext rounded to word boundary */
1061: word next_page = ((text_end + (word)max_page_size - 1)
1062: & ~((word)max_page_size - 1));
1063: word page_offset = (text_end & ((word)max_page_size - 1));
1064: VOLATILE char * result = (char *)(next_page + page_offset);
1065: /* Note that this isnt equivalent to just adding */
1066: /* max_page_size to &etext if &etext is at a page boundary */
1067:
1068: GC_setup_temporary_fault_handler();
1069: if (setjmp(GC_jmp_buf) == 0) {
1070: /* Try writing to the address. */
1071: *result = *result;
1072: GC_reset_fault_handler();
1073: } else {
1074: GC_reset_fault_handler();
1075: /* We got here via a longjmp. The address is not readable. */
1076: /* This is known to happen under Solaris 2.4 + gcc, which place */
1077: /* string constants in the text segment, but after etext. */
1078: /* Use plan B. Note that we now know there is a gap between */
1079: /* text and data segments, so plan A bought us something. */
1080: result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
1081: }
1082: return((char *)result);
1083: }
1084: # endif
1085:
1086:
1087: void GC_register_data_segments()
1088: {
1089: # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1090: && !defined(MACOSX)
1091: # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
1092: /* As of Solaris 2.3, the Solaris threads implementation */
1093: /* allocates the data structure for the initial thread with */
1094: /* sbrk at process startup. It needs to be scanned, so that */
1095: /* we don't lose some malloc allocated data structures */
1096: /* hanging from it. We're on thin ice here ... */
1097: extern caddr_t sbrk();
1098:
1099: GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1100: # else
1101: GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1102: # endif
1103: # endif
1104: # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1105: GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
1106: # endif
1107: # if defined(MACOS)
1108: {
1109: # if defined(THINK_C)
1110: extern void* GC_MacGetDataStart(void);
1111: /* globals begin above stack and end at a5. */
1112: GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1113: (ptr_t)LMGetCurrentA5(), FALSE);
1114: # else
1115: # if defined(__MWERKS__)
1116: # if !__POWERPC__
1117: extern void* GC_MacGetDataStart(void);
1118: /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1119: # if __option(far_data)
1120: extern void* GC_MacGetDataEnd(void);
1121: # endif
1122: /* globals begin above stack and end at a5. */
1123: GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1124: (ptr_t)LMGetCurrentA5(), FALSE);
1125: /* MATTHEW: Handle Far Globals */
1126: # if __option(far_data)
1127: /* Far globals follow he QD globals: */
1128: GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1129: (ptr_t)GC_MacGetDataEnd(), FALSE);
1130: # endif
1131: # else
1132: extern char __data_start__[], __data_end__[];
1133: GC_add_roots_inner((ptr_t)&__data_start__,
1134: (ptr_t)&__data_end__, FALSE);
1135: # endif /* __POWERPC__ */
1136: # endif /* __MWERKS__ */
1137: # endif /* !THINK_C */
1138: }
1139: # endif /* MACOS */
1140:
1141: /* Dynamic libraries are added at every collection, since they may */
1142: /* change. */
1143: }
1144:
1145: # endif /* ! AMIGA */
1146: # endif /* ! MSWIN32 */
1147: # endif /* ! OS2 */
1148:
1149: /*
1150: * Auxiliary routines for obtaining memory from OS.
1151: */
1152:
1153: # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1154: && !defined(MSWIN32) && !defined(MACOS) && !defined(DOS4GW)
1155:
1156: # ifdef SUNOS4
1157: extern caddr_t sbrk();
1158: # endif
1159: # ifdef __STDC__
1160: # define SBRK_ARG_T ptrdiff_t
1161: # else
1162: # define SBRK_ARG_T int
1163: # endif
1164:
1165: # ifdef RS6000
1166: /* The compiler seems to generate speculative reads one past the end of */
1167: /* an allocated object. Hence we need to make sure that the page */
1168: /* following the last heap page is also mapped. */
1169: ptr_t GC_unix_get_mem(bytes)
1170: word bytes;
1171: {
1172: caddr_t cur_brk = (caddr_t)sbrk(0);
1173: caddr_t result;
1174: SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1175: static caddr_t my_brk_val = 0;
1176:
1177: if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1178: if (lsbs != 0) {
1179: if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1180: }
1181: if (cur_brk == my_brk_val) {
1182: /* Use the extra block we allocated last time. */
1183: result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1184: if (result == (caddr_t)(-1)) return(0);
1185: result -= GC_page_size;
1186: } else {
1187: result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1188: if (result == (caddr_t)(-1)) return(0);
1189: }
1190: my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1191: return((ptr_t)result);
1192: }
1193:
1194: #else /* Not RS6000 */
1195:
1196: #if defined(USE_MMAP)
1197: /* Tested only under IRIX5 and Solaris 2 */
1198:
1199: #ifdef USE_MMAP_FIXED
1200: # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1201: /* Seems to yield better performance on Solaris 2, but can */
1202: /* be unreliable if something is already mapped at the address. */
1203: #else
1204: # define GC_MMAP_FLAGS MAP_PRIVATE
1205: #endif
1206:
1207: ptr_t GC_unix_get_mem(bytes)
1208: word bytes;
1209: {
1210: static GC_bool initialized = FALSE;
1211: static int fd;
1212: void *result;
1213: static ptr_t last_addr = HEAP_START;
1214:
1215: if (!initialized) {
1216: fd = open("/dev/zero", O_RDONLY);
1217: initialized = TRUE;
1218: }
1219: if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1220: result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1221: GC_MMAP_FLAGS, fd, 0/* offset */);
1222: if (result == MAP_FAILED) return(0);
1223: last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1224: last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1225: return((ptr_t)result);
1226: }
1227:
1228: #else /* Not RS6000, not USE_MMAP */
1229: ptr_t GC_unix_get_mem(bytes)
1230: word bytes;
1231: {
1232: ptr_t result;
1233: # ifdef IRIX5
1234: /* Bare sbrk isn't thread safe. Play by malloc rules. */
1235: /* The equivalent may be needed on other systems as well. */
1236: __LOCK_MALLOC();
1237: # endif
1238: {
1239: ptr_t cur_brk = (ptr_t)sbrk(0);
1240: SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1241:
1242: if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1243: if (lsbs != 0) {
1244: if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1245: }
1246: result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1247: if (result == (ptr_t)(-1)) result = 0;
1248: }
1249: # ifdef IRIX5
1250: __UNLOCK_MALLOC();
1251: # endif
1252: return(result);
1253: }
1254:
1255: #endif /* Not USE_MMAP */
1256: #endif /* Not RS6000 */
1257:
1258: # endif /* UN*X */
1259:
1260: # ifdef OS2
1261:
1262: void * os2_alloc(size_t bytes)
1263: {
1264: void * result;
1265:
1266: if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1267: PAG_WRITE | PAG_COMMIT)
1268: != NO_ERROR) {
1269: return(0);
1270: }
1271: if (result == 0) return(os2_alloc(bytes));
1272: return(result);
1273: }
1274:
1275: # endif /* OS2 */
1276:
1277:
1278: # ifdef MSWIN32
1279: word GC_n_heap_bases = 0;
1280:
1281: ptr_t GC_win32_get_mem(bytes)
1282: word bytes;
1283: {
1284: ptr_t result;
1285:
1286: if (GC_win32s) {
1287: /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1288: /* There are also unconfirmed rumors of other */
1289: /* problems, so we dodge the issue. */
1290: result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1291: result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1292: } else {
1293: result = (ptr_t) VirtualAlloc(NULL, bytes,
1294: MEM_COMMIT | MEM_RESERVE,
1295: PAGE_EXECUTE_READWRITE);
1296: }
1297: if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1298: /* If I read the documentation correctly, this can */
1299: /* only happen if HBLKSIZE > 64k or not a power of 2. */
1300: if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1301: GC_heap_bases[GC_n_heap_bases++] = result;
1302: return(result);
1303: }
1304:
1305: void GC_win32_free_heap ()
1306: {
1307: if (GC_win32s) {
1308: while (GC_n_heap_bases > 0) {
1309: GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1310: GC_heap_bases[GC_n_heap_bases] = 0;
1311: }
1312: }
1313: }
1314:
1315:
1316: # endif
1317:
1318: #ifdef USE_MUNMAP
1319:
1320: /* For now, this only works on some Unix-like systems. If you */
1321: /* have something else, don't define USE_MUNMAP. */
1322: /* We assume ANSI C to support this feature. */
1323: #include <unistd.h>
1324: #include <sys/mman.h>
1325: #include <sys/stat.h>
1326: #include <sys/types.h>
1327: #include <fcntl.h>
1328:
1329: /* Compute a page aligned starting address for the unmap */
1330: /* operation on a block of size bytes starting at start. */
1331: /* Return 0 if the block is too small to make this feasible. */
1332: ptr_t GC_unmap_start(ptr_t start, word bytes)
1333: {
1334: ptr_t result = start;
1335: /* Round start to next page boundary. */
1336: result += GC_page_size - 1;
1337: result = (ptr_t)((word)result & ~(GC_page_size - 1));
1338: if (result + GC_page_size > start + bytes) return 0;
1339: return result;
1340: }
1341:
1342: /* Compute end address for an unmap operation on the indicated */
1343: /* block. */
1344: ptr_t GC_unmap_end(ptr_t start, word bytes)
1345: {
1346: ptr_t end_addr = start + bytes;
1347: end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1348: return end_addr;
1349: }
1350:
1351: /* We assume that GC_remap is called on exactly the same range */
1352: /* as a previous call to GC_unmap. It is safe to consistently */
1353: /* round the endpoints in both places. */
1354: void GC_unmap(ptr_t start, word bytes)
1355: {
1356: ptr_t start_addr = GC_unmap_start(start, bytes);
1357: ptr_t end_addr = GC_unmap_end(start, bytes);
1358: word len = end_addr - start_addr;
1359: if (0 == start_addr) return;
1360: if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1361: GC_unmapped_bytes += len;
1362: }
1363:
1364:
1365: void GC_remap(ptr_t start, word bytes)
1366: {
1367: static int zero_descr = -1;
1368: ptr_t start_addr = GC_unmap_start(start, bytes);
1369: ptr_t end_addr = GC_unmap_end(start, bytes);
1370: word len = end_addr - start_addr;
1371: ptr_t result;
1372:
1373: if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1374: if (0 == start_addr) return;
1375: result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1376: MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1377: if (result != start_addr) {
1378: ABORT("mmap remapping failed");
1379: }
1380: GC_unmapped_bytes -= len;
1381: }
1382:
1383: /* Two adjacent blocks have already been unmapped and are about to */
1384: /* be merged. Unmap the whole block. This typically requires */
1385: /* that we unmap a small section in the middle that was not previously */
1386: /* unmapped due to alignment constraints. */
1387: void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1388: {
1389: ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1390: ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1391: ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1392: ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1393: ptr_t start_addr = end1_addr;
1394: ptr_t end_addr = start2_addr;
1395: word len;
1396: GC_ASSERT(start1 + bytes1 == start2);
1397: if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1398: if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1399: if (0 == start_addr) return;
1400: len = end_addr - start_addr;
1401: if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1402: GC_unmapped_bytes += len;
1403: }
1404:
1405: #endif /* USE_MUNMAP */
1406:
1407: /* Routine for pushing any additional roots. In THREADS */
1408: /* environment, this is also responsible for marking from */
1409: /* thread stacks. In the SRC_M3 case, it also handles */
1410: /* global variables. */
1411: #ifndef THREADS
1412: void (*GC_push_other_roots)() = 0;
1413: #else /* THREADS */
1414:
1415: # ifdef PCR
1416: PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1417: {
1418: struct PCR_ThCtl_TInfoRep info;
1419: PCR_ERes result;
1420:
1421: info.ti_stkLow = info.ti_stkHi = 0;
1422: result = PCR_ThCtl_GetInfo(t, &info);
1423: GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1424: return(result);
1425: }
1426:
1427: /* Push the contents of an old object. We treat this as stack */
1428: /* data only becasue that makes it robust against mark stack */
1429: /* overflow. */
1430: PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1431: {
1432: GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1433: return(PCR_ERes_okay);
1434: }
1435:
1436:
1437: void GC_default_push_other_roots()
1438: {
1439: /* Traverse data allocated by previous memory managers. */
1440: {
1441: extern struct PCR_MM_ProcsRep * GC_old_allocator;
1442:
1443: if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1444: GC_push_old_obj, 0)
1445: != PCR_ERes_okay) {
1446: ABORT("Old object enumeration failed");
1447: }
1448: }
1449: /* Traverse all thread stacks. */
1450: if (PCR_ERes_IsErr(
1451: PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1452: || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1453: ABORT("Thread stack marking failed\n");
1454: }
1455: }
1456:
1457: # endif /* PCR */
1458:
1459: # ifdef SRC_M3
1460:
1461: # ifdef ALL_INTERIOR_POINTERS
1462: --> misconfigured
1463: # endif
1464:
1465:
1466: extern void ThreadF__ProcessStacks();
1467:
1468: void GC_push_thread_stack(start, stop)
1469: word start, stop;
1470: {
1471: GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1472: }
1473:
1474: /* Push routine with M3 specific calling convention. */
1475: GC_m3_push_root(dummy1, p, dummy2, dummy3)
1476: word *p;
1477: ptr_t dummy1, dummy2;
1478: int dummy3;
1479: {
1480: word q = *p;
1481:
1482: if ((ptr_t)(q) >= GC_least_plausible_heap_addr
1483: && (ptr_t)(q) < GC_greatest_plausible_heap_addr) {
1484: GC_push_one_checked(q,FALSE);
1485: }
1486: }
1487:
1488: /* M3 set equivalent to RTHeap.TracedRefTypes */
1489: typedef struct { int elts[1]; } RefTypeSet;
1490: RefTypeSet GC_TracedRefTypes = {{0x1}};
1491:
1492: /* From finalize.c */
1493: extern void GC_push_finalizer_structures();
1494:
1495: /* From stubborn.c: */
1496: # ifdef STUBBORN_ALLOC
1497: extern GC_PTR * GC_changing_list_start;
1498: # endif
1499:
1500:
1501: void GC_default_push_other_roots()
1502: {
1503: /* Use the M3 provided routine for finding static roots. */
1504: /* This is a bit dubious, since it presumes no C roots. */
1505: /* We handle the collector roots explicitly. */
1506: {
1507: # ifdef STUBBORN_ALLOC
1508: GC_push_one(GC_changing_list_start);
1509: # endif
1510: GC_push_finalizer_structures();
1511: RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1512: }
1513: if (GC_words_allocd > 0) {
1514: ThreadF__ProcessStacks(GC_push_thread_stack);
1515: }
1516: /* Otherwise this isn't absolutely necessary, and we have */
1517: /* startup ordering problems. */
1518: }
1519:
1520: # endif /* SRC_M3 */
1521:
1522: # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
1523: || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
1.2 noro 1524: || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
1.1 noro 1525:
1526: extern void GC_push_all_stacks();
1527:
1528: void GC_default_push_other_roots()
1529: {
1530: GC_push_all_stacks();
1531: }
1532:
1533: # endif /* SOLARIS_THREADS || ... */
1534:
1535: void (*GC_push_other_roots)() = GC_default_push_other_roots;
1536:
1537: #endif
1538:
1539: /*
1540: * Routines for accessing dirty bits on virtual pages.
1541: * We plan to eventaually implement four strategies for doing so:
1542: * DEFAULT_VDB: A simple dummy implementation that treats every page
1543: * as possibly dirty. This makes incremental collection
1544: * useless, but the implementation is still correct.
1545: * PCR_VDB: Use PPCRs virtual dirty bit facility.
1546: * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1547: * works under some SVR4 variants. Even then, it may be
1548: * too slow to be entirely satisfactory. Requires reading
1549: * dirty bits for entire address space. Implementations tend
1550: * to assume that the client is a (slow) debugger.
1551: * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1552: * dirtied pages. The implementation (and implementability)
1553: * is highly system dependent. This usually fails when system
1554: * calls write to a protected page. We prevent the read system
1555: * call from doing so. It is the clients responsibility to
1556: * make sure that other system calls are similarly protected
1557: * or write only to the stack.
1558: */
1559:
1560: GC_bool GC_dirty_maintained = FALSE;
1561:
1562: # ifdef DEFAULT_VDB
1563:
1564: /* All of the following assume the allocation lock is held, and */
1565: /* signals are disabled. */
1566:
1567: /* The client asserts that unallocated pages in the heap are never */
1568: /* written. */
1569:
1570: /* Initialize virtual dirty bit implementation. */
1571: void GC_dirty_init()
1572: {
1573: GC_dirty_maintained = TRUE;
1574: }
1575:
1576: /* Retrieve system dirty bits for heap to a local buffer. */
1577: /* Restore the systems notion of which pages are dirty. */
1578: void GC_read_dirty()
1579: {}
1580:
1581: /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1582: /* If the actual page size is different, this returns TRUE if any */
1583: /* of the pages overlapping h are dirty. This routine may err on the */
1584: /* side of labelling pages as dirty (and this implementation does). */
1585: /*ARGSUSED*/
1586: GC_bool GC_page_was_dirty(h)
1587: struct hblk *h;
1588: {
1589: return(TRUE);
1590: }
1591:
1592: /*
1593: * The following two routines are typically less crucial. They matter
1594: * most with large dynamic libraries, or if we can't accurately identify
1595: * stacks, e.g. under Solaris 2.X. Otherwise the following default
1596: * versions are adequate.
1597: */
1598:
1599: /* Could any valid GC heap pointer ever have been written to this page? */
1600: /*ARGSUSED*/
1601: GC_bool GC_page_was_ever_dirty(h)
1602: struct hblk *h;
1603: {
1604: return(TRUE);
1605: }
1606:
1607: /* Reset the n pages starting at h to "was never dirty" status. */
1608: void GC_is_fresh(h, n)
1609: struct hblk *h;
1610: word n;
1611: {
1612: }
1613:
1614: /* A call hints that h is about to be written. */
1615: /* May speed up some dirty bit implementations. */
1616: /*ARGSUSED*/
1617: void GC_write_hint(h)
1618: struct hblk *h;
1619: {
1620: }
1621:
1622: # endif /* DEFAULT_VDB */
1623:
1624:
1625: # ifdef MPROTECT_VDB
1626:
1627: /*
1628: * See DEFAULT_VDB for interface descriptions.
1629: */
1630:
1631: /*
1632: * This implementation maintains dirty bits itself by catching write
1633: * faults and keeping track of them. We assume nobody else catches
1634: * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1635: * except as a result of a read system call. This means clients must
1636: * either ensure that system calls do not touch the heap, or must
1637: * provide their own wrappers analogous to the one for read.
1638: * We assume the page size is a multiple of HBLKSIZE.
1639: * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1640: * tried to use portable code where easily possible. It is known
1641: * not to work under a number of other systems.
1642: */
1643:
1644: # ifndef MSWIN32
1645:
1646: # include <sys/mman.h>
1647: # include <signal.h>
1648: # include <sys/syscall.h>
1649:
1650: # define PROTECT(addr, len) \
1.2 noro 1651: if (mprotect((caddr_t)(addr), (size_t)(len), \
1.1 noro 1652: PROT_READ | OPT_PROT_EXEC) < 0) { \
1653: ABORT("mprotect failed"); \
1654: }
1655: # define UNPROTECT(addr, len) \
1.2 noro 1656: if (mprotect((caddr_t)(addr), (size_t)(len), \
1.1 noro 1657: PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1658: ABORT("un-mprotect failed"); \
1659: }
1660:
1661: # else
1662:
1663: # include <signal.h>
1664:
1665: static DWORD protect_junk;
1666: # define PROTECT(addr, len) \
1667: if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1668: &protect_junk)) { \
1669: DWORD last_error = GetLastError(); \
1670: GC_printf1("Last error code: %lx\n", last_error); \
1671: ABORT("VirtualProtect failed"); \
1672: }
1673: # define UNPROTECT(addr, len) \
1674: if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1675: &protect_junk)) { \
1676: ABORT("un-VirtualProtect failed"); \
1677: }
1678:
1679: # endif
1680:
1681: #if defined(SUNOS4) || defined(FREEBSD)
1682: typedef void (* SIG_PF)();
1683: #endif
1684: #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX)
1.2 noro 1685: # ifdef __STDC__
1.1 noro 1686: typedef void (* SIG_PF)(int);
1.2 noro 1687: # else
1688: typedef void (* SIG_PF)();
1689: # endif
1.1 noro 1690: #endif
1691: #if defined(MSWIN32)
1692: typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
1693: # undef SIG_DFL
1694: # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1695: #endif
1696:
1697: #if defined(IRIX5) || defined(OSF1)
1698: typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1699: #endif
1700: #if defined(SUNOS5SIGS)
1.2 noro 1701: # ifdef HPUX
1702: # define SIGINFO __siginfo
1703: # else
1704: # define SIGINFO siginfo
1705: # endif
1706: # ifdef __STDC__
1707: typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
1708: # else
1709: typedef void (* REAL_SIG_PF)();
1710: # endif
1.1 noro 1711: #endif
1712: #if defined(LINUX)
1713: # include <linux/version.h>
1.2 noro 1714: # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1.1 noro 1715: typedef struct sigcontext s_c;
1716: # else
1717: typedef struct sigcontext_struct s_c;
1718: # endif
1.2 noro 1719: # if defined(ALPHA) || defined(M68K)
1720: typedef void (* REAL_SIG_PF)(int, int, s_c *);
1721: # else
1722: # if defined(IA64)
1723: typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
1724: # else
1725: typedef void (* REAL_SIG_PF)(int, s_c);
1726: # endif
1727: # endif
1.1 noro 1728: # ifdef ALPHA
1729: /* Retrieve fault address from sigcontext structure by decoding */
1730: /* instruction. */
1731: char * get_fault_addr(s_c *sc) {
1732: unsigned instr;
1733: word faultaddr;
1734:
1735: instr = *((unsigned *)(sc->sc_pc));
1736: faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
1737: faultaddr += (word) (((int)instr << 16) >> 16);
1738: return (char *)faultaddr;
1739: }
1740: # endif /* !ALPHA */
1741: # endif
1742:
1743: SIG_PF GC_old_bus_handler;
1744: SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
1745:
1746: /*ARGSUSED*/
1747: # if defined (SUNOS4) || defined(FREEBSD)
1748: void GC_write_fault_handler(sig, code, scp, addr)
1749: int sig, code;
1750: struct sigcontext *scp;
1751: char * addr;
1752: # ifdef SUNOS4
1753: # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1754: # define CODE_OK (FC_CODE(code) == FC_PROT \
1755: || (FC_CODE(code) == FC_OBJERR \
1756: && FC_ERRNO(code) == FC_PROT))
1757: # endif
1758: # ifdef FREEBSD
1759: # define SIG_OK (sig == SIGBUS)
1760: # define CODE_OK (code == BUS_PAGE_FAULT)
1761: # endif
1762: # endif
1763: # if defined(IRIX5) || defined(OSF1)
1764: # include <errno.h>
1765: void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
1766: # define SIG_OK (sig == SIGSEGV)
1767: # ifdef OSF1
1768: # define CODE_OK (code == 2 /* experimentally determined */)
1769: # endif
1770: # ifdef IRIX5
1771: # define CODE_OK (code == EACCES)
1772: # endif
1773: # endif
1774: # if defined(LINUX)
1.2 noro 1775: # if defined(ALPHA) || defined(M68K)
1.1 noro 1776: void GC_write_fault_handler(int sig, int code, s_c * sc)
1777: # else
1.2 noro 1778: # if defined(IA64)
1779: void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
1780: # else
1781: void GC_write_fault_handler(int sig, s_c sc)
1782: # endif
1.1 noro 1783: # endif
1784: # define SIG_OK (sig == SIGSEGV)
1785: # define CODE_OK TRUE
1.2 noro 1786: /* Empirically c.trapno == 14, on IA32, but is that useful? */
1787: /* Should probably consider alignment issues on other */
1788: /* architectures. */
1.1 noro 1789: # endif
1790: # if defined(SUNOS5SIGS)
1.2 noro 1791: # ifdef __STDC__
1792: void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
1793: # else
1794: void GC_write_fault_handler(sig, scp, context)
1795: int sig;
1796: struct SIGINFO *scp;
1797: void * context;
1798: # endif
1799: # ifdef HPUX
1800: # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1801: # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
1802: || (scp -> si_code == BUS_ADRERR) \
1803: || (scp -> si_code == BUS_UNKNOWN) \
1804: || (scp -> si_code == SEGV_UNKNOWN) \
1805: || (scp -> si_code == BUS_OBJERR)
1806: # else
1807: # define SIG_OK (sig == SIGSEGV)
1808: # define CODE_OK (scp -> si_code == SEGV_ACCERR)
1809: # endif
1.1 noro 1810: # endif
1811: # if defined(MSWIN32)
1812: LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
1813: # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1814: EXCEPTION_ACCESS_VIOLATION)
1815: # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
1816: /* Write fault */
1817: # endif
1818: {
1819: register unsigned i;
1820: # ifdef IRIX5
1821: char * addr = (char *) (size_t) (scp -> sc_badvaddr);
1822: # endif
1823: # if defined(OSF1) && defined(ALPHA)
1824: char * addr = (char *) (scp -> sc_traparg_a0);
1825: # endif
1826: # ifdef SUNOS5SIGS
1827: char * addr = (char *) (scp -> si_addr);
1828: # endif
1829: # ifdef LINUX
1830: # ifdef I386
1831: char * addr = (char *) (sc.cr2);
1832: # else
1833: # if defined(M68K)
1834: char * addr = NULL;
1835:
1836: struct sigcontext *scp = (struct sigcontext *)(&sc);
1837:
1838: int format = (scp->sc_formatvec >> 12) & 0xf;
1839: unsigned long *framedata = (unsigned long *)(scp + 1);
1840: unsigned long ea;
1841:
1842: if (format == 0xa || format == 0xb) {
1843: /* 68020/030 */
1844: ea = framedata[2];
1845: } else if (format == 7) {
1846: /* 68040 */
1847: ea = framedata[3];
1848: } else if (format == 4) {
1849: /* 68060 */
1850: ea = framedata[0];
1851: if (framedata[1] & 0x08000000) {
1852: /* correct addr on misaligned access */
1853: ea = (ea+4095)&(~4095);
1854: }
1855: }
1856: addr = (char *)ea;
1857: # else
1858: # ifdef ALPHA
1859: char * addr = get_fault_addr(sc);
1860: # else
1.2 noro 1861: # ifdef IA64
1862: char * addr = si -> si_addr;
1.3 ! noro 1863: /* I believe this is claimed to work on all platforms for */
! 1864: /* Linux 2.3.47 and later. Hopefully we don't have to */
! 1865: /* worry about earlier kernels on IA64. */
1.2 noro 1866: # else
1867: # if defined(POWERPC)
1868: char * addr = (char *) (sc.regs->dar);
1869: # else
1.1 noro 1870: --> architecture not supported
1.2 noro 1871: # endif
1872: # endif
1.1 noro 1873: # endif
1874: # endif
1875: # endif
1876: # endif
1877: # if defined(MSWIN32)
1878: char * addr = (char *) (exc_info -> ExceptionRecord
1879: -> ExceptionInformation[1]);
1880: # define sig SIGSEGV
1881: # endif
1882:
1883: if (SIG_OK && CODE_OK) {
1884: register struct hblk * h =
1885: (struct hblk *)((word)addr & ~(GC_page_size-1));
1886: GC_bool in_allocd_block;
1887:
1888: # ifdef SUNOS5SIGS
1889: /* Address is only within the correct physical page. */
1890: in_allocd_block = FALSE;
1891: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1892: if (HDR(h+i) != 0) {
1893: in_allocd_block = TRUE;
1894: }
1895: }
1896: # else
1897: in_allocd_block = (HDR(addr) != 0);
1898: # endif
1899: if (!in_allocd_block) {
1900: /* Heap blocks now begin and end on page boundaries */
1901: SIG_PF old_handler;
1902:
1903: if (sig == SIGSEGV) {
1904: old_handler = GC_old_segv_handler;
1905: } else {
1906: old_handler = GC_old_bus_handler;
1907: }
1908: if (old_handler == SIG_DFL) {
1909: # ifndef MSWIN32
1910: GC_err_printf1("Segfault at 0x%lx\n", addr);
1911: ABORT("Unexpected bus error or segmentation fault");
1912: # else
1913: return(EXCEPTION_CONTINUE_SEARCH);
1914: # endif
1915: } else {
1916: # if defined (SUNOS4) || defined(FREEBSD)
1917: (*old_handler) (sig, code, scp, addr);
1918: return;
1919: # endif
1920: # if defined (SUNOS5SIGS)
1921: (*(REAL_SIG_PF)old_handler) (sig, scp, context);
1922: return;
1923: # endif
1924: # if defined (LINUX)
1.2 noro 1925: # if defined(ALPHA) || defined(M68K)
1.1 noro 1926: (*(REAL_SIG_PF)old_handler) (sig, code, sc);
1927: # else
1.2 noro 1928: # if defined(IA64)
1929: (*(REAL_SIG_PF)old_handler) (sig, si, scp);
1930: # else
1.1 noro 1931: (*(REAL_SIG_PF)old_handler) (sig, sc);
1.2 noro 1932: # endif
1.1 noro 1933: # endif
1934: return;
1935: # endif
1936: # if defined (IRIX5) || defined(OSF1)
1937: (*(REAL_SIG_PF)old_handler) (sig, code, scp);
1938: return;
1939: # endif
1940: # ifdef MSWIN32
1941: return((*old_handler)(exc_info));
1942: # endif
1943: }
1944: }
1945: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1946: register int index = PHT_HASH(h+i);
1947:
1948: set_pht_entry_from_index(GC_dirty_pages, index);
1949: }
1950: UNPROTECT(h, GC_page_size);
1951: # if defined(OSF1) || defined(LINUX)
1952: /* These reset the signal handler each time by default. */
1953: signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
1954: # endif
1955: /* The write may not take place before dirty bits are read. */
1956: /* But then we'll fault again ... */
1957: # ifdef MSWIN32
1958: return(EXCEPTION_CONTINUE_EXECUTION);
1959: # else
1960: return;
1961: # endif
1962: }
1963: #ifdef MSWIN32
1964: return EXCEPTION_CONTINUE_SEARCH;
1965: #else
1966: GC_err_printf1("Segfault at 0x%lx\n", addr);
1967: ABORT("Unexpected bus error or segmentation fault");
1968: #endif
1969: }
1970:
1971: /*
1972: * We hold the allocation lock. We expect block h to be written
1973: * shortly.
1974: */
1975: void GC_write_hint(h)
1976: struct hblk *h;
1977: {
1978: register struct hblk * h_trunc;
1979: register unsigned i;
1980: register GC_bool found_clean;
1981:
1982: if (!GC_dirty_maintained) return;
1983: h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
1984: found_clean = FALSE;
1985: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1986: register int index = PHT_HASH(h_trunc+i);
1987:
1988: if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
1989: found_clean = TRUE;
1990: set_pht_entry_from_index(GC_dirty_pages, index);
1991: }
1992: }
1993: if (found_clean) {
1994: UNPROTECT(h_trunc, GC_page_size);
1995: }
1996: }
1997:
1998: void GC_dirty_init()
1999: {
2000: #if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
2001: struct sigaction act, oldact;
2002: # ifdef IRIX5
2003: act.sa_flags = SA_RESTART;
2004: act.sa_handler = GC_write_fault_handler;
2005: # else
2006: act.sa_flags = SA_RESTART | SA_SIGINFO;
2007: act.sa_sigaction = GC_write_fault_handler;
2008: # endif
2009: (void)sigemptyset(&act.sa_mask);
2010: #endif
2011: # ifdef PRINTSTATS
2012: GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2013: # endif
2014: GC_dirty_maintained = TRUE;
2015: if (GC_page_size % HBLKSIZE != 0) {
2016: GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2017: ABORT("Page size not multiple of HBLKSIZE");
2018: }
2019: # if defined(SUNOS4) || defined(FREEBSD)
2020: GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2021: if (GC_old_bus_handler == SIG_IGN) {
2022: GC_err_printf0("Previously ignored bus error!?");
2023: GC_old_bus_handler = SIG_DFL;
2024: }
2025: if (GC_old_bus_handler != SIG_DFL) {
2026: # ifdef PRINTSTATS
2027: GC_err_printf0("Replaced other SIGBUS handler\n");
2028: # endif
2029: }
2030: # endif
2031: # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
2032: GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2033: if (GC_old_segv_handler == SIG_IGN) {
2034: GC_err_printf0("Previously ignored segmentation violation!?");
2035: GC_old_segv_handler = SIG_DFL;
2036: }
2037: if (GC_old_segv_handler != SIG_DFL) {
2038: # ifdef PRINTSTATS
2039: GC_err_printf0("Replaced other SIGSEGV handler\n");
2040: # endif
2041: }
2042: # endif
2043: # if defined(SUNOS5SIGS) || defined(IRIX5)
1.2 noro 2044: # if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
1.1 noro 2045: sigaction(SIGSEGV, 0, &oldact);
2046: sigaction(SIGSEGV, &act, 0);
2047: # else
2048: sigaction(SIGSEGV, &act, &oldact);
2049: # endif
2050: # if defined(_sigargs)
2051: /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2052: /* sa_sigaction. */
2053: GC_old_segv_handler = oldact.sa_handler;
2054: # else /* Irix 6.x or SUNOS5SIGS */
2055: if (oldact.sa_flags & SA_SIGINFO) {
2056: GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2057: } else {
2058: GC_old_segv_handler = oldact.sa_handler;
2059: }
2060: # endif
2061: if (GC_old_segv_handler == SIG_IGN) {
2062: GC_err_printf0("Previously ignored segmentation violation!?");
2063: GC_old_segv_handler = SIG_DFL;
2064: }
2065: if (GC_old_segv_handler != SIG_DFL) {
2066: # ifdef PRINTSTATS
2067: GC_err_printf0("Replaced other SIGSEGV handler\n");
2068: # endif
2069: }
1.2 noro 2070: # ifdef HPUX
2071: sigaction(SIGBUS, &act, &oldact);
2072: GC_old_bus_handler = oldact.sa_handler;
2073: if (GC_old_segv_handler != SIG_DFL) {
2074: # ifdef PRINTSTATS
2075: GC_err_printf0("Replaced other SIGBUS handler\n");
2076: # endif
2077: }
2078: # endif
1.1 noro 2079: # endif
2080: # if defined(MSWIN32)
2081: GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2082: if (GC_old_segv_handler != NULL) {
2083: # ifdef PRINTSTATS
2084: GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2085: # endif
2086: } else {
2087: GC_old_segv_handler = SIG_DFL;
2088: }
2089: # endif
2090: }
2091:
2092:
2093:
2094: void GC_protect_heap()
2095: {
2096: ptr_t start;
2097: word len;
2098: unsigned i;
2099:
2100: for (i = 0; i < GC_n_heap_sects; i++) {
2101: start = GC_heap_sects[i].hs_start;
2102: len = GC_heap_sects[i].hs_bytes;
2103: PROTECT(start, len);
2104: }
2105: }
2106:
2107: /* We assume that either the world is stopped or its OK to lose dirty */
2108: /* bits while this is happenning (as in GC_enable_incremental). */
2109: void GC_read_dirty()
2110: {
2111: BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2112: (sizeof GC_dirty_pages));
2113: BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2114: GC_protect_heap();
2115: }
2116:
2117: GC_bool GC_page_was_dirty(h)
2118: struct hblk * h;
2119: {
2120: register word index = PHT_HASH(h);
2121:
2122: return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2123: }
2124:
2125: /*
2126: * Acquiring the allocation lock here is dangerous, since this
2127: * can be called from within GC_call_with_alloc_lock, and the cord
2128: * package does so. On systems that allow nested lock acquisition, this
2129: * happens to work.
2130: * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2131: */
2132:
2133: void GC_begin_syscall()
2134: {
2135: if (!I_HOLD_LOCK()) LOCK();
2136: }
2137:
2138: void GC_end_syscall()
2139: {
2140: if (!I_HOLD_LOCK()) UNLOCK();
2141: }
2142:
2143: void GC_unprotect_range(addr, len)
2144: ptr_t addr;
2145: word len;
2146: {
2147: struct hblk * start_block;
2148: struct hblk * end_block;
2149: register struct hblk *h;
2150: ptr_t obj_start;
2151:
2152: if (!GC_incremental) return;
2153: obj_start = GC_base(addr);
2154: if (obj_start == 0) return;
2155: if (GC_base(addr + len - 1) != obj_start) {
2156: ABORT("GC_unprotect_range(range bigger than object)");
2157: }
2158: start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2159: end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2160: end_block += GC_page_size/HBLKSIZE - 1;
2161: for (h = start_block; h <= end_block; h++) {
2162: register word index = PHT_HASH(h);
2163:
2164: set_pht_entry_from_index(GC_dirty_pages, index);
2165: }
2166: UNPROTECT(start_block,
2167: ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2168: }
2169:
1.3 ! noro 2170: #if !defined(MSWIN32) && !defined(LINUX_THREADS)
1.1 noro 2171: /* Replacement for UNIX system call. */
2172: /* Other calls that write to the heap */
2173: /* should be handled similarly. */
2174: # if defined(__STDC__) && !defined(SUNOS4)
2175: # include <unistd.h>
1.3 ! noro 2176: # include <sys/uio.h>
1.1 noro 2177: ssize_t read(int fd, void *buf, size_t nbyte)
2178: # else
2179: # ifndef LINT
2180: int read(fd, buf, nbyte)
2181: # else
2182: int GC_read(fd, buf, nbyte)
2183: # endif
2184: int fd;
2185: char *buf;
2186: int nbyte;
2187: # endif
2188: {
2189: int result;
2190:
2191: GC_begin_syscall();
2192: GC_unprotect_range(buf, (word)nbyte);
1.3 ! noro 2193: # if defined(IRIX5) || defined(LINUX_THREADS)
1.1 noro 2194: /* Indirect system call may not always be easily available. */
2195: /* We could call _read, but that would interfere with the */
2196: /* libpthread interception of read. */
1.3 ! noro 2197: /* On Linux, we have to be careful with the linuxthreads */
! 2198: /* read interception. */
1.1 noro 2199: {
2200: struct iovec iov;
2201:
2202: iov.iov_base = buf;
2203: iov.iov_len = nbyte;
2204: result = readv(fd, &iov, 1);
2205: }
2206: # else
2207: result = syscall(SYS_read, fd, buf, nbyte);
2208: # endif
2209: GC_end_syscall();
2210: return(result);
2211: }
1.3 ! noro 2212: #endif /* !MSWIN32 && !LINUX */
! 2213:
! 2214: #ifdef USE_LD_WRAP
! 2215: /* We use the GNU ld call wrapping facility. */
! 2216: /* This requires that the linker be invoked with "--wrap read". */
! 2217: /* This can be done by passing -Wl,"--wrap read" to gcc. */
! 2218: /* I'm not sure that this actually wraps whatever version of read */
! 2219: /* is called by stdio. That code also mentions __read. */
! 2220: # include <unistd.h>
! 2221: ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
! 2222: {
! 2223: int result;
! 2224:
! 2225: GC_begin_syscall();
! 2226: GC_unprotect_range(buf, (word)nbyte);
! 2227: result = __real_read(fd, buf, nbyte);
! 2228: GC_end_syscall();
! 2229: return(result);
! 2230: }
! 2231:
! 2232: /* We should probably also do this for __read, or whatever stdio */
! 2233: /* actually calls. */
! 2234: #endif
1.1 noro 2235:
2236: /*ARGSUSED*/
2237: GC_bool GC_page_was_ever_dirty(h)
2238: struct hblk *h;
2239: {
2240: return(TRUE);
2241: }
2242:
2243: /* Reset the n pages starting at h to "was never dirty" status. */
2244: /*ARGSUSED*/
2245: void GC_is_fresh(h, n)
2246: struct hblk *h;
2247: word n;
2248: {
2249: }
2250:
2251: # endif /* MPROTECT_VDB */
2252:
2253: # ifdef PROC_VDB
2254:
2255: /*
2256: * See DEFAULT_VDB for interface descriptions.
2257: */
2258:
2259: /*
2260: * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2261: * from which we can read page modified bits. This facility is far from
2262: * optimal (e.g. we would like to get the info for only some of the
2263: * address space), but it avoids intercepting system calls.
2264: */
2265:
2266: #include <errno.h>
2267: #include <sys/types.h>
2268: #include <sys/signal.h>
2269: #include <sys/fault.h>
2270: #include <sys/syscall.h>
2271: #include <sys/procfs.h>
2272: #include <sys/stat.h>
2273: #include <fcntl.h>
2274:
2275: #define INITIAL_BUF_SZ 4096
2276: word GC_proc_buf_size = INITIAL_BUF_SZ;
2277: char *GC_proc_buf;
2278:
2279: #ifdef SOLARIS_THREADS
2280: /* We don't have exact sp values for threads. So we count on */
2281: /* occasionally declaring stack pages to be fresh. Thus we */
2282: /* need a real implementation of GC_is_fresh. We can't clear */
2283: /* entries in GC_written_pages, since that would declare all */
2284: /* pages with the given hash address to be fresh. */
2285: # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2286: struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
2287: /* Collisions are dropped. */
2288:
2289: # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2290: # define ADD_FRESH_PAGE(h) \
2291: GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2292: # define PAGE_IS_FRESH(h) \
2293: (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2294: #endif
2295:
2296: /* Add all pages in pht2 to pht1 */
2297: void GC_or_pages(pht1, pht2)
2298: page_hash_table pht1, pht2;
2299: {
2300: register int i;
2301:
2302: for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2303: }
2304:
2305: int GC_proc_fd;
2306:
2307: void GC_dirty_init()
2308: {
2309: int fd;
2310: char buf[30];
2311:
2312: GC_dirty_maintained = TRUE;
2313: if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
2314: register int i;
2315:
2316: for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
2317: # ifdef PRINTSTATS
2318: GC_printf1("Allocated words:%lu:all pages may have been written\n",
2319: (unsigned long)
2320: (GC_words_allocd + GC_words_allocd_before_gc));
2321: # endif
2322: }
2323: sprintf(buf, "/proc/%d", getpid());
2324: fd = open(buf, O_RDONLY);
2325: if (fd < 0) {
2326: ABORT("/proc open failed");
2327: }
2328: GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
2329: close(fd);
2330: if (GC_proc_fd < 0) {
2331: ABORT("/proc ioctl failed");
2332: }
2333: GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
2334: # ifdef SOLARIS_THREADS
2335: GC_fresh_pages = (struct hblk **)
2336: GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
2337: if (GC_fresh_pages == 0) {
2338: GC_err_printf0("No space for fresh pages\n");
2339: EXIT();
2340: }
2341: BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
2342: # endif
2343: }
2344:
2345: /* Ignore write hints. They don't help us here. */
2346: /*ARGSUSED*/
2347: void GC_write_hint(h)
2348: struct hblk *h;
2349: {
2350: }
2351:
2352: #ifdef SOLARIS_THREADS
2353: # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2354: #else
2355: # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2356: #endif
2357:
2358: void GC_read_dirty()
2359: {
2360: unsigned long ps, np;
2361: int nmaps;
2362: ptr_t vaddr;
2363: struct prasmap * map;
2364: char * bufp;
2365: ptr_t current_addr, limit;
2366: int i;
2367: int dummy;
2368:
2369: BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
2370:
2371: bufp = GC_proc_buf;
2372: if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2373: # ifdef PRINTSTATS
2374: GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2375: GC_proc_buf_size);
2376: # endif
2377: {
2378: /* Retry with larger buffer. */
2379: word new_size = 2 * GC_proc_buf_size;
2380: char * new_buf = GC_scratch_alloc(new_size);
2381:
2382: if (new_buf != 0) {
2383: GC_proc_buf = bufp = new_buf;
2384: GC_proc_buf_size = new_size;
2385: }
2386: if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2387: WARN("Insufficient space for /proc read\n", 0);
2388: /* Punt: */
2389: memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
2390: memset(GC_written_pages, 0xff, sizeof(page_hash_table));
2391: # ifdef SOLARIS_THREADS
2392: BZERO(GC_fresh_pages,
2393: MAX_FRESH_PAGES * sizeof (struct hblk *));
2394: # endif
2395: return;
2396: }
2397: }
2398: }
2399: /* Copy dirty bits into GC_grungy_pages */
2400: nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
2401: /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2402: nmaps, PG_REFERENCED, PG_MODIFIED); */
2403: bufp = bufp + sizeof(struct prpageheader);
2404: for (i = 0; i < nmaps; i++) {
2405: map = (struct prasmap *)bufp;
2406: vaddr = (ptr_t)(map -> pr_vaddr);
2407: ps = map -> pr_pagesize;
2408: np = map -> pr_npage;
2409: /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2410: limit = vaddr + ps * np;
2411: bufp += sizeof (struct prasmap);
2412: for (current_addr = vaddr;
2413: current_addr < limit; current_addr += ps){
2414: if ((*bufp++) & PG_MODIFIED) {
2415: register struct hblk * h = (struct hblk *) current_addr;
2416:
2417: while ((ptr_t)h < current_addr + ps) {
2418: register word index = PHT_HASH(h);
2419:
2420: set_pht_entry_from_index(GC_grungy_pages, index);
2421: # ifdef SOLARIS_THREADS
2422: {
2423: register int slot = FRESH_PAGE_SLOT(h);
2424:
2425: if (GC_fresh_pages[slot] == h) {
2426: GC_fresh_pages[slot] = 0;
2427: }
2428: }
2429: # endif
2430: h++;
2431: }
2432: }
2433: }
2434: bufp += sizeof(long) - 1;
2435: bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
2436: }
2437: /* Update GC_written_pages. */
2438: GC_or_pages(GC_written_pages, GC_grungy_pages);
2439: # ifdef SOLARIS_THREADS
2440: /* Make sure that old stacks are considered completely clean */
2441: /* unless written again. */
2442: GC_old_stacks_are_fresh();
2443: # endif
2444: }
2445:
2446: #undef READ
2447:
2448: GC_bool GC_page_was_dirty(h)
2449: struct hblk *h;
2450: {
2451: register word index = PHT_HASH(h);
2452: register GC_bool result;
2453:
2454: result = get_pht_entry_from_index(GC_grungy_pages, index);
2455: # ifdef SOLARIS_THREADS
2456: if (result && PAGE_IS_FRESH(h)) result = FALSE;
2457: /* This happens only if page was declared fresh since */
2458: /* the read_dirty call, e.g. because it's in an unused */
2459: /* thread stack. It's OK to treat it as clean, in */
2460: /* that case. And it's consistent with */
2461: /* GC_page_was_ever_dirty. */
2462: # endif
2463: return(result);
2464: }
2465:
2466: GC_bool GC_page_was_ever_dirty(h)
2467: struct hblk *h;
2468: {
2469: register word index = PHT_HASH(h);
2470: register GC_bool result;
2471:
2472: result = get_pht_entry_from_index(GC_written_pages, index);
2473: # ifdef SOLARIS_THREADS
2474: if (result && PAGE_IS_FRESH(h)) result = FALSE;
2475: # endif
2476: return(result);
2477: }
2478:
2479: /* Caller holds allocation lock. */
2480: void GC_is_fresh(h, n)
2481: struct hblk *h;
2482: word n;
2483: {
2484:
2485: register word index;
2486:
2487: # ifdef SOLARIS_THREADS
2488: register word i;
2489:
2490: if (GC_fresh_pages != 0) {
2491: for (i = 0; i < n; i++) {
2492: ADD_FRESH_PAGE(h + i);
2493: }
2494: }
2495: # endif
2496: }
2497:
2498: # endif /* PROC_VDB */
2499:
2500:
2501: # ifdef PCR_VDB
2502:
2503: # include "vd/PCR_VD.h"
2504:
2505: # define NPAGES (32*1024) /* 128 MB */
2506:
2507: PCR_VD_DB GC_grungy_bits[NPAGES];
2508:
2509: ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
2510: /* HBLKSIZE aligned. */
2511:
2512: void GC_dirty_init()
2513: {
2514: GC_dirty_maintained = TRUE;
2515: /* For the time being, we assume the heap generally grows up */
2516: GC_vd_base = GC_heap_sects[0].hs_start;
2517: if (GC_vd_base == 0) {
2518: ABORT("Bad initial heap segment");
2519: }
2520: if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
2521: != PCR_ERes_okay) {
2522: ABORT("dirty bit initialization failed");
2523: }
2524: }
2525:
2526: void GC_read_dirty()
2527: {
2528: /* lazily enable dirty bits on newly added heap sects */
2529: {
2530: static int onhs = 0;
2531: int nhs = GC_n_heap_sects;
2532: for( ; onhs < nhs; onhs++ ) {
2533: PCR_VD_WriteProtectEnable(
2534: GC_heap_sects[onhs].hs_start,
2535: GC_heap_sects[onhs].hs_bytes );
2536: }
2537: }
2538:
2539:
2540: if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
2541: != PCR_ERes_okay) {
2542: ABORT("dirty bit read failed");
2543: }
2544: }
2545:
2546: GC_bool GC_page_was_dirty(h)
2547: struct hblk *h;
2548: {
2549: if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
2550: return(TRUE);
2551: }
2552: return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
2553: }
2554:
2555: /*ARGSUSED*/
2556: void GC_write_hint(h)
2557: struct hblk *h;
2558: {
2559: PCR_VD_WriteProtectDisable(h, HBLKSIZE);
2560: PCR_VD_WriteProtectEnable(h, HBLKSIZE);
2561: }
2562:
2563: # endif /* PCR_VDB */
2564:
2565: /*
2566: * Call stack save code for debugging.
2567: * Should probably be in mach_dep.c, but that requires reorganization.
2568: */
2569: #if defined(SPARC) && !defined(LINUX)
2570: # if defined(SUNOS4)
2571: # include <machine/frame.h>
2572: # else
2573: # if defined (DRSNX)
2574: # include <sys/sparc/frame.h>
2575: # else
2576: # if defined(OPENBSD)
2577: # include <frame.h>
2578: # else
2579: # include <sys/frame.h>
2580: # endif
2581: # endif
2582: # endif
2583: # if NARGS > 6
2584: --> We only know how to to get the first 6 arguments
2585: # endif
2586:
2587: #ifdef SAVE_CALL_CHAIN
2588: /* Fill in the pc and argument information for up to NFRAMES of my */
2589: /* callers. Ignore my frame and my callers frame. */
2590:
2591: #ifdef OPENBSD
2592: # define FR_SAVFP fr_fp
2593: # define FR_SAVPC fr_pc
2594: #else
2595: # define FR_SAVFP fr_savfp
2596: # define FR_SAVPC fr_savpc
2597: #endif
2598:
2599: void GC_save_callers (info)
2600: struct callinfo info[NFRAMES];
2601: {
2602: struct frame *frame;
2603: struct frame *fp;
2604: int nframes = 0;
2605: word GC_save_regs_in_stack();
2606:
2607: frame = (struct frame *) GC_save_regs_in_stack ();
2608:
2609: for (fp = frame -> FR_SAVFP; fp != 0 && nframes < NFRAMES;
2610: fp = fp -> FR_SAVFP, nframes++) {
2611: register int i;
2612:
2613: info[nframes].ci_pc = fp->FR_SAVPC;
2614: for (i = 0; i < NARGS; i++) {
2615: info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
2616: }
2617: }
2618: if (nframes < NFRAMES) info[nframes].ci_pc = 0;
2619: }
2620:
2621: #endif /* SAVE_CALL_CHAIN */
2622: #endif /* SPARC */
2623:
2624:
2625:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>