Annotation of OpenXM_contrib2/asir2000/gc/os_dep.c, Revision 1.2
1.1 noro 1: int ox_usr1_sent, ox_int_received, critical_when_signal;
2: static int inside_critical_section;
3:
4: /*
1.2 ! noro 5: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
1.1 noro 6: * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
1.2 ! noro 7: * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
! 8: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
1.1 noro 9: *
10: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12: *
13: * Permission is hereby granted to use or copy this program
14: * for any purpose, provided the above notices are retained on all copies.
15: * Permission to modify the code and to distribute modified code is granted,
16: * provided the above notices are retained, and a notice that the code was
17: * modified is included with the above copyright notice.
18: */
19:
20: # include "gc_priv.h"
21:
22: # if defined(LINUX) && !defined(POWERPC)
23: # include <linux/version.h>
24: # if (LINUX_VERSION_CODE <= 0x10400)
25: /* Ugly hack to get struct sigcontext_struct definition. Required */
26: /* for some early 1.3.X releases. Will hopefully go away soon. */
27: /* in some later Linux releases, asm/sigcontext.h may have to */
28: /* be included instead. */
29: # define __KERNEL__
30: # include <asm/signal.h>
31: # undef __KERNEL__
32: # else
33: /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
34: /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
35: /* prototypes, so we have to include the top-level sigcontext.h to */
36: /* make sure the former gets defined to be the latter if appropriate. */
37: # include <features.h>
38: # if 2 <= __GLIBC__
1.2 ! noro 39: # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
1.1 noro 40: /* glibc 2.1 no longer has sigcontext.h. But signal.h */
41: /* has the right declaration for glibc 2.1. */
42: # include <sigcontext.h>
43: # endif /* 0 == __GLIBC_MINOR__ */
44: # else /* not 2 <= __GLIBC__ */
45: /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
46: /* one. Check LINUX_VERSION_CODE to see which we should reference. */
47: # include <asm/sigcontext.h>
48: # endif /* 2 <= __GLIBC__ */
49: # endif
50: # endif
51: # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
52: # include <sys/types.h>
53: # if !defined(MSWIN32) && !defined(SUNOS4)
54: # include <unistd.h>
55: # endif
56: # endif
57:
58: # include <stdio.h>
59: # include <signal.h>
60:
61: /* Blatantly OS dependent routines, except for those that are related */
1.2 ! noro 62: /* to dynamic loading. */
1.1 noro 63:
64: # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
65: # define NEED_FIND_LIMIT
66: # endif
67:
1.2 ! noro 68: # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
1.1 noro 69: # define NEED_FIND_LIMIT
70: # endif
71:
72: # if (defined(SUNOS4) & defined(DYNAMIC_LOADING)) && !defined(PCR)
73: # define NEED_FIND_LIMIT
74: # endif
75:
76: # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
77: # define NEED_FIND_LIMIT
78: # endif
79:
1.2 ! noro 80: # if defined(LINUX) && \
! 81: (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64))
1.1 noro 82: # define NEED_FIND_LIMIT
83: # endif
84:
85: #ifdef NEED_FIND_LIMIT
86: # include <setjmp.h>
87: #endif
88:
89: #ifdef FREEBSD
90: # include <machine/trap.h>
91: #endif
92:
93: #ifdef AMIGA
94: # include <proto/exec.h>
95: # include <proto/dos.h>
96: # include <dos/dosextens.h>
97: # include <workbench/startup.h>
98: #endif
99:
100: #ifdef MSWIN32
101: # define WIN32_LEAN_AND_MEAN
102: # define NOSERVICE
103: # include <windows.h>
104: #endif
105:
106: #ifdef MACOS
107: # include <Processes.h>
108: #endif
109:
110: #ifdef IRIX5
111: # include <sys/uio.h>
112: # include <malloc.h> /* for locking */
113: #endif
114: #ifdef USE_MMAP
115: # include <sys/types.h>
116: # include <sys/mman.h>
117: # include <sys/stat.h>
118: # include <fcntl.h>
119: #endif
120:
121: #ifdef SUNOS5SIGS
122: # include <sys/siginfo.h>
123: # undef setjmp
124: # undef longjmp
125: # define setjmp(env) sigsetjmp(env, 1)
126: # define longjmp(env, val) siglongjmp(env, val)
127: # define jmp_buf sigjmp_buf
128: #endif
129:
130: #ifdef DJGPP
131: /* Apparently necessary for djgpp 2.01. May casuse problems with */
132: /* other versions. */
133: typedef long unsigned int caddr_t;
134: #endif
135:
136: #ifdef PCR
137: # include "il/PCR_IL.h"
138: # include "th/PCR_ThCtl.h"
139: # include "mm/PCR_MM.h"
140: #endif
141:
142: #if !defined(NO_EXECUTE_PERMISSION)
143: # define OPT_PROT_EXEC PROT_EXEC
144: #else
145: # define OPT_PROT_EXEC 0
146: #endif
147:
1.2 ! noro 148: #if defined(LINUX) && (defined(POWERPC) || defined(SPARC) || defined(ALPHA) \
! 149: || defined(IA64))
1.1 noro 150: /* The I386 case can be handled without a search. The Alpha case */
151: /* used to be handled differently as well, but the rules changed */
152: /* for recent Linux versions. This seems to be the easiest way to */
153: /* cover all versions. */
154: ptr_t GC_data_start;
155:
156: extern char * GC_copyright[]; /* Any data symbol would do. */
157:
158: void GC_init_linux_data_start()
159: {
160: extern ptr_t GC_find_limit();
161:
162: GC_data_start = GC_find_limit((ptr_t)GC_copyright, FALSE);
163: }
164: #endif
165:
166: # ifdef OS2
167:
168: # include <stddef.h>
169:
170: # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
171:
172: struct exe_hdr {
173: unsigned short magic_number;
174: unsigned short padding[29];
175: long new_exe_offset;
176: };
177:
178: #define E_MAGIC(x) (x).magic_number
179: #define EMAGIC 0x5A4D
180: #define E_LFANEW(x) (x).new_exe_offset
181:
182: struct e32_exe {
183: unsigned char magic_number[2];
184: unsigned char byte_order;
185: unsigned char word_order;
186: unsigned long exe_format_level;
187: unsigned short cpu;
188: unsigned short os;
189: unsigned long padding1[13];
190: unsigned long object_table_offset;
191: unsigned long object_count;
192: unsigned long padding2[31];
193: };
194:
195: #define E32_MAGIC1(x) (x).magic_number[0]
196: #define E32MAGIC1 'L'
197: #define E32_MAGIC2(x) (x).magic_number[1]
198: #define E32MAGIC2 'X'
199: #define E32_BORDER(x) (x).byte_order
200: #define E32LEBO 0
201: #define E32_WORDER(x) (x).word_order
202: #define E32LEWO 0
203: #define E32_CPU(x) (x).cpu
204: #define E32CPU286 1
205: #define E32_OBJTAB(x) (x).object_table_offset
206: #define E32_OBJCNT(x) (x).object_count
207:
208: struct o32_obj {
209: unsigned long size;
210: unsigned long base;
211: unsigned long flags;
212: unsigned long pagemap;
213: unsigned long mapsize;
214: unsigned long reserved;
215: };
216:
217: #define O32_FLAGS(x) (x).flags
218: #define OBJREAD 0x0001L
219: #define OBJWRITE 0x0002L
220: #define OBJINVALID 0x0080L
221: #define O32_SIZE(x) (x).size
222: #define O32_BASE(x) (x).base
223:
224: # else /* IBM's compiler */
225:
226: /* A kludge to get around what appears to be a header file bug */
227: # ifndef WORD
228: # define WORD unsigned short
229: # endif
230: # ifndef DWORD
231: # define DWORD unsigned long
232: # endif
233:
234: # define EXE386 1
235: # include <newexe.h>
236: # include <exe386.h>
237:
238: # endif /* __IBMC__ */
239:
240: # define INCL_DOSEXCEPTIONS
241: # define INCL_DOSPROCESS
242: # define INCL_DOSERRORS
243: # define INCL_DOSMODULEMGR
244: # define INCL_DOSMEMMGR
245: # include <os2.h>
246:
247:
248: /* Disable and enable signals during nontrivial allocations */
249:
250: void GC_disable_signals(void)
251: {
252: ULONG nest;
253:
254: DosEnterMustComplete(&nest);
255: if (nest != 1) ABORT("nested GC_disable_signals");
256: }
257:
258: void GC_enable_signals(void)
259: {
260: ULONG nest;
261:
262: DosExitMustComplete(&nest);
263: if (nest != 0) ABORT("GC_enable_signals");
264: }
265:
266:
267: # else
268:
269: # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
270: && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW)
271:
272: # if defined(sigmask) && !defined(UTS4)
273: /* Use the traditional BSD interface */
274: # define SIGSET_T int
275: # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
276: # define SIG_FILL(set) (set) = 0x7fffffff
277: /* Setting the leading bit appears to provoke a bug in some */
278: /* longjmp implementations. Most systems appear not to have */
279: /* a signal 32. */
280: # define SIGSETMASK(old, new) (old) = sigsetmask(new)
281: # else
282: /* Use POSIX/SYSV interface */
283: # define SIGSET_T sigset_t
284: # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
285: # define SIG_FILL(set) sigfillset(&set)
286: # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
287: # endif
288:
289: static GC_bool mask_initialized = FALSE;
290:
291: static SIGSET_T new_mask;
292:
293: static SIGSET_T old_mask;
294:
295: static SIGSET_T dummy;
296:
297: #if defined(PRINTSTATS) && !defined(THREADS)
298: # define CHECK_SIGNALS
299: int GC_sig_disabled = 0;
300: #endif
301:
302: void GC_disable_signals()
303: {
304: if (!mask_initialized) {
305: SIG_FILL(new_mask);
306:
307: SIG_DEL(new_mask, SIGSEGV);
308: SIG_DEL(new_mask, SIGILL);
309: SIG_DEL(new_mask, SIGQUIT);
310: # ifdef SIGBUS
311: SIG_DEL(new_mask, SIGBUS);
312: # endif
313: # ifdef SIGIOT
314: SIG_DEL(new_mask, SIGIOT);
315: # endif
316: # ifdef SIGEMT
317: SIG_DEL(new_mask, SIGEMT);
318: # endif
319: # ifdef SIGTRAP
320: SIG_DEL(new_mask, SIGTRAP);
321: # endif
322: mask_initialized = TRUE;
323: }
324: # ifdef CHECK_SIGNALS
325: if (GC_sig_disabled != 0) ABORT("Nested disables");
326: GC_sig_disabled++;
327: # endif
328: SIGSETMASK(old_mask,new_mask);
329: if ( critical_when_signal )
330: inside_critical_section = 1;
331: else {
332: inside_critical_section = 0;
333: critical_when_signal = 1;
334: }
335: }
336:
337: void GC_enable_signals()
338: {
339: # ifdef CHECK_SIGNALS
340: if (GC_sig_disabled != 1) ABORT("Unmatched enable");
341: GC_sig_disabled--;
342: # endif
343: SIGSETMASK(dummy,old_mask);
344: if ( !inside_critical_section ) {
345: critical_when_signal = 0;
346: if ( ox_usr1_sent ) {
347: ox_usr1_sent = 0; ox_usr1_handler();
348: }
349: if ( ox_int_received ) {
350: ox_int_received = 0; int_handler();
351: }
352: } else
353: inside_critical_section = 0;
354: }
355:
356: # endif /* !PCR */
357:
358: # endif /*!OS/2 */
359:
360: /* Ivan Demakov: simplest way (to me) */
361: #ifdef DOS4GW
362: void GC_disable_signals() { }
363: void GC_enable_signals() { }
364: #endif
365:
366: /* Find the page size */
367: word GC_page_size;
368:
369: # ifdef MSWIN32
370: void GC_setpagesize()
371: {
372: SYSTEM_INFO sysinfo;
373:
374: GetSystemInfo(&sysinfo);
375: GC_page_size = sysinfo.dwPageSize;
376: }
377:
378: # else
379: # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
380: || defined(USE_MUNMAP)
381: void GC_setpagesize()
382: {
383: GC_page_size = GETPAGESIZE();
384: }
385: # else
386: /* It's acceptable to fake it. */
387: void GC_setpagesize()
388: {
389: GC_page_size = HBLKSIZE;
390: }
391: # endif
392: # endif
393:
394: /*
395: * Find the base of the stack.
396: * Used only in single-threaded environment.
397: * With threads, GC_mark_roots needs to know how to do this.
398: * Called with allocator lock held.
399: */
400: # ifdef MSWIN32
401: # define is_writable(prot) ((prot) == PAGE_READWRITE \
402: || (prot) == PAGE_WRITECOPY \
403: || (prot) == PAGE_EXECUTE_READWRITE \
404: || (prot) == PAGE_EXECUTE_WRITECOPY)
405: /* Return the number of bytes that are writable starting at p. */
406: /* The pointer p is assumed to be page aligned. */
407: /* If base is not 0, *base becomes the beginning of the */
408: /* allocation region containing p. */
409: word GC_get_writable_length(ptr_t p, ptr_t *base)
410: {
411: MEMORY_BASIC_INFORMATION buf;
412: word result;
413: word protect;
414:
415: result = VirtualQuery(p, &buf, sizeof(buf));
416: if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
417: if (base != 0) *base = (ptr_t)(buf.AllocationBase);
418: protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
419: if (!is_writable(protect)) {
420: return(0);
421: }
422: if (buf.State != MEM_COMMIT) return(0);
423: return(buf.RegionSize);
424: }
425:
426: ptr_t GC_get_stack_base()
427: {
428: int dummy;
429: ptr_t sp = (ptr_t)(&dummy);
430: ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
431: word size = GC_get_writable_length(trunc_sp, 0);
432:
433: return(trunc_sp + size);
434: }
435:
436:
437: # else
438:
439: # ifdef OS2
440:
441: ptr_t GC_get_stack_base()
442: {
443: PTIB ptib;
444: PPIB ppib;
445:
446: if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
447: GC_err_printf0("DosGetInfoBlocks failed\n");
448: ABORT("DosGetInfoBlocks failed\n");
449: }
450: return((ptr_t)(ptib -> tib_pstacklimit));
451: }
452:
453: # else
454:
455: # ifdef AMIGA
456:
457: ptr_t GC_get_stack_base()
458: {
459: struct Process *proc = (struct Process*)SysBase->ThisTask;
460:
461: /* Reference: Amiga Guru Book Pages: 42,567,574 */
462: if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS
463: && proc->pr_CLI != NULL) {
464: /* first ULONG is StackSize */
465: /*longPtr = proc->pr_ReturnAddr;
466: size = longPtr[0];*/
467:
468: return (char *)proc->pr_ReturnAddr + sizeof(ULONG);
469: } else {
470: return (char *)proc->pr_Task.tc_SPUpper;
471: }
472: }
473:
474: #if 0 /* old version */
475: ptr_t GC_get_stack_base()
476: {
477: extern struct WBStartup *_WBenchMsg;
478: extern long __base;
479: extern long __stack;
480: struct Task *task;
481: struct Process *proc;
482: struct CommandLineInterface *cli;
483: long size;
484:
485: if ((task = FindTask(0)) == 0) {
486: GC_err_puts("Cannot find own task structure\n");
487: ABORT("task missing");
488: }
489: proc = (struct Process *)task;
490: cli = BADDR(proc->pr_CLI);
491:
492: if (_WBenchMsg != 0 || cli == 0) {
493: size = (char *)task->tc_SPUpper - (char *)task->tc_SPLower;
494: } else {
495: size = cli->cli_DefaultStack * 4;
496: }
497: return (ptr_t)(__base + GC_max(size, __stack));
498: }
499: #endif /* 0 */
500:
501: # else /* !AMIGA, !OS2, ... */
502:
503: # ifdef NEED_FIND_LIMIT
504: /* Some tools to implement HEURISTIC2 */
505: # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
506: /* static */ jmp_buf GC_jmp_buf;
507:
508: /*ARGSUSED*/
509: void GC_fault_handler(sig)
510: int sig;
511: {
512: longjmp(GC_jmp_buf, 1);
513: }
514:
515: # ifdef __STDC__
516: typedef void (*handler)(int);
517: # else
518: typedef void (*handler)();
519: # endif
520:
521: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
522: static struct sigaction old_segv_act;
1.2 ! noro 523: # if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
1.1 noro 524: static struct sigaction old_bus_act;
525: # endif
526: # else
527: static handler old_segv_handler, old_bus_handler;
528: # endif
529:
530: void GC_setup_temporary_fault_handler()
531: {
532: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
533: struct sigaction act;
534:
535: act.sa_handler = GC_fault_handler;
536: act.sa_flags = SA_RESTART | SA_NODEFER;
537: /* The presence of SA_NODEFER represents yet another gross */
538: /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
539: /* interact correctly with -lthread. We hide the confusion */
540: /* by making sure that signal handling doesn't affect the */
541: /* signal mask. */
542:
543: (void) sigemptyset(&act.sa_mask);
544: # ifdef IRIX_THREADS
545: /* Older versions have a bug related to retrieving and */
546: /* and setting a handler at the same time. */
547: (void) sigaction(SIGSEGV, 0, &old_segv_act);
548: (void) sigaction(SIGSEGV, &act, 0);
549: # else
550: (void) sigaction(SIGSEGV, &act, &old_segv_act);
1.2 ! noro 551: # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
! 552: || defined(HPUX)
! 553: /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
! 554: /* Pthreads doesn't exist under Irix 5.x, so we */
! 555: /* don't have to worry in the threads case. */
1.1 noro 556: (void) sigaction(SIGBUS, &act, &old_bus_act);
557: # endif
558: # endif /* IRIX_THREADS */
559: # else
560: old_segv_handler = signal(SIGSEGV, GC_fault_handler);
561: # ifdef SIGBUS
562: old_bus_handler = signal(SIGBUS, GC_fault_handler);
563: # endif
564: # endif
565: }
566:
567: void GC_reset_fault_handler()
568: {
569: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
570: (void) sigaction(SIGSEGV, &old_segv_act, 0);
1.2 ! noro 571: # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
! 572: || defined(HPUX)
1.1 noro 573: (void) sigaction(SIGBUS, &old_bus_act, 0);
574: # endif
575: # else
576: (void) signal(SIGSEGV, old_segv_handler);
577: # ifdef SIGBUS
578: (void) signal(SIGBUS, old_bus_handler);
579: # endif
580: # endif
581: }
582:
583: /* Return the first nonaddressible location > p (up) or */
584: /* the smallest location q s.t. [q,p] is addressible (!up). */
585: ptr_t GC_find_limit(p, up)
586: ptr_t p;
587: GC_bool up;
588: {
589: static VOLATILE ptr_t result;
590: /* Needs to be static, since otherwise it may not be */
591: /* preserved across the longjmp. Can safely be */
592: /* static since it's only called once, with the */
593: /* allocation lock held. */
594:
595:
596: GC_setup_temporary_fault_handler();
597: if (setjmp(GC_jmp_buf) == 0) {
598: result = (ptr_t)(((word)(p))
599: & ~(MIN_PAGE_SIZE-1));
600: for (;;) {
601: if (up) {
602: result += MIN_PAGE_SIZE;
603: } else {
604: result -= MIN_PAGE_SIZE;
605: }
606: GC_noop1((word)(*result));
607: }
608: }
609: GC_reset_fault_handler();
610: if (!up) {
611: result += MIN_PAGE_SIZE;
612: }
613: return(result);
614: }
615: # endif
616:
1.2 ! noro 617: #ifdef LINUX_STACKBOTTOM
! 618:
! 619: # define STAT_SKIP 27 /* Number of fields preceding startstack */
! 620: /* field in /proc/<pid>/stat */
! 621:
! 622: ptr_t GC_linux_stack_base(void)
! 623: {
! 624: char buf[50];
! 625: FILE *f;
! 626: char c;
! 627: word result = 0;
! 628: int i;
! 629:
! 630: sprintf(buf, "/proc/%d/stat", getpid());
! 631: f = fopen(buf, "r");
! 632: if (NULL == f) ABORT("Couldn't open /proc/<pid>/stat");
! 633: c = getc(f);
! 634: /* Skip the required number of fields. This number is hopefully */
! 635: /* constant across all Linux implementations. */
! 636: for (i = 0; i < STAT_SKIP; ++i) {
! 637: while (isspace(c)) c = getc(f);
! 638: while (!isspace(c)) c = getc(f);
! 639: }
! 640: while (isspace(c)) c = getc(f);
! 641: while (isdigit(c)) {
! 642: result *= 10;
! 643: result += c - '0';
! 644: c = getc(f);
! 645: }
! 646: if (result < 0x10000000) ABORT("Absurd stack bottom value");
! 647: return (ptr_t)result;
! 648: }
! 649:
! 650: #endif /* LINUX_STACKBOTTOM */
1.1 noro 651:
652: ptr_t GC_get_stack_base()
653: {
654: word dummy;
655: ptr_t result;
656:
657: # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
658:
659: # ifdef STACKBOTTOM
660: return(STACKBOTTOM);
661: # else
662: # ifdef HEURISTIC1
663: # ifdef STACK_GROWS_DOWN
664: result = (ptr_t)((((word)(&dummy))
665: + STACKBOTTOM_ALIGNMENT_M1)
666: & ~STACKBOTTOM_ALIGNMENT_M1);
667: # else
668: result = (ptr_t)(((word)(&dummy))
669: & ~STACKBOTTOM_ALIGNMENT_M1);
670: # endif
671: # endif /* HEURISTIC1 */
1.2 ! noro 672: # ifdef LINUX_STACKBOTTOM
! 673: result = GC_linux_stack_base();
! 674: # endif
1.1 noro 675: # ifdef HEURISTIC2
676: # ifdef STACK_GROWS_DOWN
677: result = GC_find_limit((ptr_t)(&dummy), TRUE);
678: # ifdef HEURISTIC2_LIMIT
679: if (result > HEURISTIC2_LIMIT
680: && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
681: result = HEURISTIC2_LIMIT;
682: }
683: # endif
684: # else
685: result = GC_find_limit((ptr_t)(&dummy), FALSE);
686: # ifdef HEURISTIC2_LIMIT
687: if (result < HEURISTIC2_LIMIT
688: && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
689: result = HEURISTIC2_LIMIT;
690: }
691: # endif
692: # endif
693:
694: # endif /* HEURISTIC2 */
695: # ifdef STACK_GROWS_DOWN
696: if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
697: # endif
698: return(result);
699: # endif /* STACKBOTTOM */
700: }
701:
702: # endif /* ! AMIGA */
703: # endif /* ! OS2 */
704: # endif /* ! MSWIN32 */
705:
706: /*
707: * Register static data segment(s) as roots.
708: * If more data segments are added later then they need to be registered
709: * add that point (as we do with SunOS dynamic loading),
710: * or GC_mark_roots needs to check for them (as we do with PCR).
711: * Called with allocator lock held.
712: */
713:
714: # ifdef OS2
715:
716: void GC_register_data_segments()
717: {
718: PTIB ptib;
719: PPIB ppib;
720: HMODULE module_handle;
721: # define PBUFSIZ 512
722: UCHAR path[PBUFSIZ];
723: FILE * myexefile;
724: struct exe_hdr hdrdos; /* MSDOS header. */
725: struct e32_exe hdr386; /* Real header for my executable */
726: struct o32_obj seg; /* Currrent segment */
727: int nsegs;
728:
729:
730: if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
731: GC_err_printf0("DosGetInfoBlocks failed\n");
732: ABORT("DosGetInfoBlocks failed\n");
733: }
734: module_handle = ppib -> pib_hmte;
735: if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
736: GC_err_printf0("DosQueryModuleName failed\n");
737: ABORT("DosGetInfoBlocks failed\n");
738: }
739: myexefile = fopen(path, "rb");
740: if (myexefile == 0) {
741: GC_err_puts("Couldn't open executable ");
742: GC_err_puts(path); GC_err_puts("\n");
743: ABORT("Failed to open executable\n");
744: }
745: if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
746: GC_err_puts("Couldn't read MSDOS header from ");
747: GC_err_puts(path); GC_err_puts("\n");
748: ABORT("Couldn't read MSDOS header");
749: }
750: if (E_MAGIC(hdrdos) != EMAGIC) {
751: GC_err_puts("Executable has wrong DOS magic number: ");
752: GC_err_puts(path); GC_err_puts("\n");
753: ABORT("Bad DOS magic number");
754: }
755: if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
756: GC_err_puts("Seek to new header failed in ");
757: GC_err_puts(path); GC_err_puts("\n");
758: ABORT("Bad DOS magic number");
759: }
760: if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
761: GC_err_puts("Couldn't read MSDOS header from ");
762: GC_err_puts(path); GC_err_puts("\n");
763: ABORT("Couldn't read OS/2 header");
764: }
765: if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
766: GC_err_puts("Executable has wrong OS/2 magic number:");
767: GC_err_puts(path); GC_err_puts("\n");
768: ABORT("Bad OS/2 magic number");
769: }
770: if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
771: GC_err_puts("Executable %s has wrong byte order: ");
772: GC_err_puts(path); GC_err_puts("\n");
773: ABORT("Bad byte order");
774: }
775: if ( E32_CPU(hdr386) == E32CPU286) {
776: GC_err_puts("GC can't handle 80286 executables: ");
777: GC_err_puts(path); GC_err_puts("\n");
778: EXIT();
779: }
780: if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
781: SEEK_SET) != 0) {
782: GC_err_puts("Seek to object table failed: ");
783: GC_err_puts(path); GC_err_puts("\n");
784: ABORT("Seek to object table failed");
785: }
786: for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
787: int flags;
788: if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
789: GC_err_puts("Couldn't read obj table entry from ");
790: GC_err_puts(path); GC_err_puts("\n");
791: ABORT("Couldn't read obj table entry");
792: }
793: flags = O32_FLAGS(seg);
794: if (!(flags & OBJWRITE)) continue;
795: if (!(flags & OBJREAD)) continue;
796: if (flags & OBJINVALID) {
797: GC_err_printf0("Object with invalid pages?\n");
798: continue;
799: }
800: GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
801: }
802: }
803:
804: # else
805:
806: # ifdef MSWIN32
807: /* Unfortunately, we have to handle win32s very differently from NT, */
808: /* Since VirtualQuery has very different semantics. In particular, */
809: /* under win32s a VirtualQuery call on an unmapped page returns an */
810: /* invalid result. Under GC_register_data_segments is a noop and */
811: /* all real work is done by GC_register_dynamic_libraries. Under */
812: /* win32s, we cannot find the data segments associated with dll's. */
813: /* We rgister the main data segment here. */
814: GC_bool GC_win32s = FALSE; /* We're running under win32s. */
815:
816: GC_bool GC_is_win32s()
817: {
818: DWORD v = GetVersion();
819:
820: /* Check that this is not NT, and Windows major version <= 3 */
821: return ((v & 0x80000000) && (v & 0xff) <= 3);
822: }
823:
824: void GC_init_win32()
825: {
826: GC_win32s = GC_is_win32s();
827: }
828:
829: /* Return the smallest address a such that VirtualQuery */
830: /* returns correct results for all addresses between a and start. */
831: /* Assumes VirtualQuery returns correct information for start. */
832: ptr_t GC_least_described_address(ptr_t start)
833: {
834: MEMORY_BASIC_INFORMATION buf;
835: SYSTEM_INFO sysinfo;
836: DWORD result;
837: LPVOID limit;
838: ptr_t p;
839: LPVOID q;
840:
841: GetSystemInfo(&sysinfo);
842: limit = sysinfo.lpMinimumApplicationAddress;
843: p = (ptr_t)((word)start & ~(GC_page_size - 1));
844: for (;;) {
845: q = (LPVOID)(p - GC_page_size);
846: if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
847: result = VirtualQuery(q, &buf, sizeof(buf));
848: if (result != sizeof(buf) || buf.AllocationBase == 0) break;
849: p = (ptr_t)(buf.AllocationBase);
850: }
851: return(p);
852: }
853:
854: /* Is p the start of either the malloc heap, or of one of our */
855: /* heap sections? */
856: GC_bool GC_is_heap_base (ptr_t p)
857: {
858:
859: register unsigned i;
860:
861: # ifndef REDIRECT_MALLOC
862: static ptr_t malloc_heap_pointer = 0;
863:
864: if (0 == malloc_heap_pointer) {
865: MEMORY_BASIC_INFORMATION buf;
866: register DWORD result = VirtualQuery(malloc(1), &buf, sizeof(buf));
867:
868: if (result != sizeof(buf)) {
869: ABORT("Weird VirtualQuery result");
870: }
871: malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
872: }
873: if (p == malloc_heap_pointer) return(TRUE);
874: # endif
875: for (i = 0; i < GC_n_heap_bases; i++) {
876: if (GC_heap_bases[i] == p) return(TRUE);
877: }
878: return(FALSE);
879: }
880:
881: void GC_register_root_section(ptr_t static_root)
882: {
883: MEMORY_BASIC_INFORMATION buf;
884: SYSTEM_INFO sysinfo;
885: DWORD result;
886: DWORD protect;
887: LPVOID p;
888: char * base;
889: char * limit, * new_limit;
890:
891: if (!GC_win32s) return;
892: p = base = limit = GC_least_described_address(static_root);
893: GetSystemInfo(&sysinfo);
894: while (p < sysinfo.lpMaximumApplicationAddress) {
895: result = VirtualQuery(p, &buf, sizeof(buf));
896: if (result != sizeof(buf) || buf.AllocationBase == 0
897: || GC_is_heap_base(buf.AllocationBase)) break;
898: new_limit = (char *)p + buf.RegionSize;
899: protect = buf.Protect;
900: if (buf.State == MEM_COMMIT
901: && is_writable(protect)) {
902: if ((char *)p == limit) {
903: limit = new_limit;
904: } else {
905: if (base != limit) GC_add_roots_inner(base, limit, FALSE);
906: base = p;
907: limit = new_limit;
908: }
909: }
910: if (p > (LPVOID)new_limit /* overflow */) break;
911: p = (LPVOID)new_limit;
912: }
913: if (base != limit) GC_add_roots_inner(base, limit, FALSE);
914: }
915:
916: void GC_register_data_segments()
917: {
918: static char dummy;
919:
920: GC_register_root_section((ptr_t)(&dummy));
921: }
922: # else
923: # ifdef AMIGA
924:
925: void GC_register_data_segments()
926: {
927: struct Process *proc;
928: struct CommandLineInterface *cli;
929: BPTR myseglist;
930: ULONG *data;
931:
932: int num;
933:
934:
935: # ifdef __GNUC__
936: ULONG dataSegSize;
937: GC_bool found_segment = FALSE;
938: extern char __data_size[];
939:
940: dataSegSize=__data_size+8;
941: /* Can`t find the Location of __data_size, because
942: it`s possible that is it, inside the segment. */
943:
944: # endif
945:
946: proc= (struct Process*)SysBase->ThisTask;
947:
948: /* Reference: Amiga Guru Book Pages: 538ff,565,573
949: and XOper.asm */
950: if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS) {
951: if (proc->pr_CLI == NULL) {
952: myseglist = proc->pr_SegList;
953: } else {
954: /* ProcLoaded 'Loaded as a command: '*/
955: cli = BADDR(proc->pr_CLI);
956: myseglist = cli->cli_Module;
957: }
958: } else {
959: ABORT("Not a Process.");
960: }
961:
962: if (myseglist == NULL) {
963: ABORT("Arrrgh.. can't find segments, aborting");
964: }
965:
966: /* xoper hunks Shell Process */
967:
968: num=0;
969: for (data = (ULONG *)BADDR(myseglist); data != NULL;
970: data = (ULONG *)BADDR(data[0])) {
971: if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
972: ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
973: # ifdef __GNUC__
974: if (dataSegSize == data[-1]) {
975: found_segment = TRUE;
976: }
977: # endif
978: GC_add_roots_inner((char *)&data[1],
979: ((char *)&data[1]) + data[-1], FALSE);
980: }
981: ++num;
982: } /* for */
983: # ifdef __GNUC__
984: if (!found_segment) {
985: ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
986: }
987: # endif
988: }
989:
990: #if 0 /* old version */
991: void GC_register_data_segments()
992: {
993: extern struct WBStartup *_WBenchMsg;
994: struct Process *proc;
995: struct CommandLineInterface *cli;
996: BPTR myseglist;
997: ULONG *data;
998:
999: if ( _WBenchMsg != 0 ) {
1000: if ((myseglist = _WBenchMsg->sm_Segment) == 0) {
1001: GC_err_puts("No seglist from workbench\n");
1002: return;
1003: }
1004: } else {
1005: if ((proc = (struct Process *)FindTask(0)) == 0) {
1006: GC_err_puts("Cannot find process structure\n");
1007: return;
1008: }
1009: if ((cli = BADDR(proc->pr_CLI)) == 0) {
1010: GC_err_puts("No CLI\n");
1011: return;
1012: }
1013: if ((myseglist = cli->cli_Module) == 0) {
1014: GC_err_puts("No seglist from CLI\n");
1015: return;
1016: }
1017: }
1018:
1019: for (data = (ULONG *)BADDR(myseglist); data != 0;
1020: data = (ULONG *)BADDR(data[0])) {
1021: # ifdef AMIGA_SKIP_SEG
1022: if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
1023: ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
1024: # else
1025: {
1026: # endif /* AMIGA_SKIP_SEG */
1027: GC_add_roots_inner((char *)&data[1],
1028: ((char *)&data[1]) + data[-1], FALSE);
1029: }
1030: }
1031: }
1032: #endif /* old version */
1033:
1034:
1035: # else
1036:
1037: # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
1038: char * GC_SysVGetDataStart(max_page_size, etext_addr)
1039: int max_page_size;
1040: int * etext_addr;
1041: {
1042: word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1043: & ~(sizeof(word) - 1);
1044: /* etext rounded to word boundary */
1045: word next_page = ((text_end + (word)max_page_size - 1)
1046: & ~((word)max_page_size - 1));
1047: word page_offset = (text_end & ((word)max_page_size - 1));
1048: VOLATILE char * result = (char *)(next_page + page_offset);
1049: /* Note that this isnt equivalent to just adding */
1050: /* max_page_size to &etext if &etext is at a page boundary */
1051:
1052: GC_setup_temporary_fault_handler();
1053: if (setjmp(GC_jmp_buf) == 0) {
1054: /* Try writing to the address. */
1055: *result = *result;
1056: GC_reset_fault_handler();
1057: } else {
1058: GC_reset_fault_handler();
1059: /* We got here via a longjmp. The address is not readable. */
1060: /* This is known to happen under Solaris 2.4 + gcc, which place */
1061: /* string constants in the text segment, but after etext. */
1062: /* Use plan B. Note that we now know there is a gap between */
1063: /* text and data segments, so plan A bought us something. */
1064: result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
1065: }
1066: return((char *)result);
1067: }
1068: # endif
1069:
1070:
1071: void GC_register_data_segments()
1072: {
1073: # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1074: && !defined(MACOSX)
1075: # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
1076: /* As of Solaris 2.3, the Solaris threads implementation */
1077: /* allocates the data structure for the initial thread with */
1078: /* sbrk at process startup. It needs to be scanned, so that */
1079: /* we don't lose some malloc allocated data structures */
1080: /* hanging from it. We're on thin ice here ... */
1081: extern caddr_t sbrk();
1082:
1083: GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1084: # else
1085: GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1086: # endif
1087: # endif
1088: # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1089: GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
1090: # endif
1091: # if defined(MACOS)
1092: {
1093: # if defined(THINK_C)
1094: extern void* GC_MacGetDataStart(void);
1095: /* globals begin above stack and end at a5. */
1096: GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1097: (ptr_t)LMGetCurrentA5(), FALSE);
1098: # else
1099: # if defined(__MWERKS__)
1100: # if !__POWERPC__
1101: extern void* GC_MacGetDataStart(void);
1102: /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1103: # if __option(far_data)
1104: extern void* GC_MacGetDataEnd(void);
1105: # endif
1106: /* globals begin above stack and end at a5. */
1107: GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1108: (ptr_t)LMGetCurrentA5(), FALSE);
1109: /* MATTHEW: Handle Far Globals */
1110: # if __option(far_data)
1111: /* Far globals follow he QD globals: */
1112: GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1113: (ptr_t)GC_MacGetDataEnd(), FALSE);
1114: # endif
1115: # else
1116: extern char __data_start__[], __data_end__[];
1117: GC_add_roots_inner((ptr_t)&__data_start__,
1118: (ptr_t)&__data_end__, FALSE);
1119: # endif /* __POWERPC__ */
1120: # endif /* __MWERKS__ */
1121: # endif /* !THINK_C */
1122: }
1123: # endif /* MACOS */
1124:
1125: /* Dynamic libraries are added at every collection, since they may */
1126: /* change. */
1127: }
1128:
1129: # endif /* ! AMIGA */
1130: # endif /* ! MSWIN32 */
1131: # endif /* ! OS2 */
1132:
1133: /*
1134: * Auxiliary routines for obtaining memory from OS.
1135: */
1136:
1137: # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1138: && !defined(MSWIN32) && !defined(MACOS) && !defined(DOS4GW)
1139:
1140: # ifdef SUNOS4
1141: extern caddr_t sbrk();
1142: # endif
1143: # ifdef __STDC__
1144: # define SBRK_ARG_T ptrdiff_t
1145: # else
1146: # define SBRK_ARG_T int
1147: # endif
1148:
1149: # ifdef RS6000
1150: /* The compiler seems to generate speculative reads one past the end of */
1151: /* an allocated object. Hence we need to make sure that the page */
1152: /* following the last heap page is also mapped. */
1153: ptr_t GC_unix_get_mem(bytes)
1154: word bytes;
1155: {
1156: caddr_t cur_brk = (caddr_t)sbrk(0);
1157: caddr_t result;
1158: SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1159: static caddr_t my_brk_val = 0;
1160:
1161: if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1162: if (lsbs != 0) {
1163: if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1164: }
1165: if (cur_brk == my_brk_val) {
1166: /* Use the extra block we allocated last time. */
1167: result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1168: if (result == (caddr_t)(-1)) return(0);
1169: result -= GC_page_size;
1170: } else {
1171: result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1172: if (result == (caddr_t)(-1)) return(0);
1173: }
1174: my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1175: return((ptr_t)result);
1176: }
1177:
1178: #else /* Not RS6000 */
1179:
1180: #if defined(USE_MMAP)
1181: /* Tested only under IRIX5 and Solaris 2 */
1182:
1183: #ifdef USE_MMAP_FIXED
1184: # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1185: /* Seems to yield better performance on Solaris 2, but can */
1186: /* be unreliable if something is already mapped at the address. */
1187: #else
1188: # define GC_MMAP_FLAGS MAP_PRIVATE
1189: #endif
1190:
1191: ptr_t GC_unix_get_mem(bytes)
1192: word bytes;
1193: {
1194: static GC_bool initialized = FALSE;
1195: static int fd;
1196: void *result;
1197: static ptr_t last_addr = HEAP_START;
1198:
1199: if (!initialized) {
1200: fd = open("/dev/zero", O_RDONLY);
1201: initialized = TRUE;
1202: }
1203: if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1204: result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1205: GC_MMAP_FLAGS, fd, 0/* offset */);
1206: if (result == MAP_FAILED) return(0);
1207: last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1208: last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1209: return((ptr_t)result);
1210: }
1211:
1212: #else /* Not RS6000, not USE_MMAP */
1213: ptr_t GC_unix_get_mem(bytes)
1214: word bytes;
1215: {
1216: ptr_t result;
1217: # ifdef IRIX5
1218: /* Bare sbrk isn't thread safe. Play by malloc rules. */
1219: /* The equivalent may be needed on other systems as well. */
1220: __LOCK_MALLOC();
1221: # endif
1222: {
1223: ptr_t cur_brk = (ptr_t)sbrk(0);
1224: SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1225:
1226: if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1227: if (lsbs != 0) {
1228: if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1229: }
1230: result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1231: if (result == (ptr_t)(-1)) result = 0;
1232: }
1233: # ifdef IRIX5
1234: __UNLOCK_MALLOC();
1235: # endif
1236: return(result);
1237: }
1238:
1239: #endif /* Not USE_MMAP */
1240: #endif /* Not RS6000 */
1241:
1242: # endif /* UN*X */
1243:
1244: # ifdef OS2
1245:
1246: void * os2_alloc(size_t bytes)
1247: {
1248: void * result;
1249:
1250: if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1251: PAG_WRITE | PAG_COMMIT)
1252: != NO_ERROR) {
1253: return(0);
1254: }
1255: if (result == 0) return(os2_alloc(bytes));
1256: return(result);
1257: }
1258:
1259: # endif /* OS2 */
1260:
1261:
1262: # ifdef MSWIN32
1263: word GC_n_heap_bases = 0;
1264:
1265: ptr_t GC_win32_get_mem(bytes)
1266: word bytes;
1267: {
1268: ptr_t result;
1269:
1270: if (GC_win32s) {
1271: /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1272: /* There are also unconfirmed rumors of other */
1273: /* problems, so we dodge the issue. */
1274: result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1275: result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1276: } else {
1277: result = (ptr_t) VirtualAlloc(NULL, bytes,
1278: MEM_COMMIT | MEM_RESERVE,
1279: PAGE_EXECUTE_READWRITE);
1280: }
1281: if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1282: /* If I read the documentation correctly, this can */
1283: /* only happen if HBLKSIZE > 64k or not a power of 2. */
1284: if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1285: GC_heap_bases[GC_n_heap_bases++] = result;
1286: return(result);
1287: }
1288:
1289: void GC_win32_free_heap ()
1290: {
1291: if (GC_win32s) {
1292: while (GC_n_heap_bases > 0) {
1293: GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1294: GC_heap_bases[GC_n_heap_bases] = 0;
1295: }
1296: }
1297: }
1298:
1299:
1300: # endif
1301:
1302: #ifdef USE_MUNMAP
1303:
1304: /* For now, this only works on some Unix-like systems. If you */
1305: /* have something else, don't define USE_MUNMAP. */
1306: /* We assume ANSI C to support this feature. */
1307: #include <unistd.h>
1308: #include <sys/mman.h>
1309: #include <sys/stat.h>
1310: #include <sys/types.h>
1311: #include <fcntl.h>
1312:
1313: /* Compute a page aligned starting address for the unmap */
1314: /* operation on a block of size bytes starting at start. */
1315: /* Return 0 if the block is too small to make this feasible. */
1316: ptr_t GC_unmap_start(ptr_t start, word bytes)
1317: {
1318: ptr_t result = start;
1319: /* Round start to next page boundary. */
1320: result += GC_page_size - 1;
1321: result = (ptr_t)((word)result & ~(GC_page_size - 1));
1322: if (result + GC_page_size > start + bytes) return 0;
1323: return result;
1324: }
1325:
1326: /* Compute end address for an unmap operation on the indicated */
1327: /* block. */
1328: ptr_t GC_unmap_end(ptr_t start, word bytes)
1329: {
1330: ptr_t end_addr = start + bytes;
1331: end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1332: return end_addr;
1333: }
1334:
1335: /* We assume that GC_remap is called on exactly the same range */
1336: /* as a previous call to GC_unmap. It is safe to consistently */
1337: /* round the endpoints in both places. */
1338: void GC_unmap(ptr_t start, word bytes)
1339: {
1340: ptr_t start_addr = GC_unmap_start(start, bytes);
1341: ptr_t end_addr = GC_unmap_end(start, bytes);
1342: word len = end_addr - start_addr;
1343: if (0 == start_addr) return;
1344: if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1345: GC_unmapped_bytes += len;
1346: }
1347:
1348:
1349: void GC_remap(ptr_t start, word bytes)
1350: {
1351: static int zero_descr = -1;
1352: ptr_t start_addr = GC_unmap_start(start, bytes);
1353: ptr_t end_addr = GC_unmap_end(start, bytes);
1354: word len = end_addr - start_addr;
1355: ptr_t result;
1356:
1357: if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1358: if (0 == start_addr) return;
1359: result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1360: MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1361: if (result != start_addr) {
1362: ABORT("mmap remapping failed");
1363: }
1364: GC_unmapped_bytes -= len;
1365: }
1366:
1367: /* Two adjacent blocks have already been unmapped and are about to */
1368: /* be merged. Unmap the whole block. This typically requires */
1369: /* that we unmap a small section in the middle that was not previously */
1370: /* unmapped due to alignment constraints. */
1371: void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1372: {
1373: ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1374: ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1375: ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1376: ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1377: ptr_t start_addr = end1_addr;
1378: ptr_t end_addr = start2_addr;
1379: word len;
1380: GC_ASSERT(start1 + bytes1 == start2);
1381: if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1382: if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1383: if (0 == start_addr) return;
1384: len = end_addr - start_addr;
1385: if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1386: GC_unmapped_bytes += len;
1387: }
1388:
1389: #endif /* USE_MUNMAP */
1390:
1391: /* Routine for pushing any additional roots. In THREADS */
1392: /* environment, this is also responsible for marking from */
1393: /* thread stacks. In the SRC_M3 case, it also handles */
1394: /* global variables. */
1395: #ifndef THREADS
1396: void (*GC_push_other_roots)() = 0;
1397: #else /* THREADS */
1398:
1399: # ifdef PCR
1400: PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1401: {
1402: struct PCR_ThCtl_TInfoRep info;
1403: PCR_ERes result;
1404:
1405: info.ti_stkLow = info.ti_stkHi = 0;
1406: result = PCR_ThCtl_GetInfo(t, &info);
1407: GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1408: return(result);
1409: }
1410:
1411: /* Push the contents of an old object. We treat this as stack */
1412: /* data only becasue that makes it robust against mark stack */
1413: /* overflow. */
1414: PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1415: {
1416: GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1417: return(PCR_ERes_okay);
1418: }
1419:
1420:
1421: void GC_default_push_other_roots()
1422: {
1423: /* Traverse data allocated by previous memory managers. */
1424: {
1425: extern struct PCR_MM_ProcsRep * GC_old_allocator;
1426:
1427: if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1428: GC_push_old_obj, 0)
1429: != PCR_ERes_okay) {
1430: ABORT("Old object enumeration failed");
1431: }
1432: }
1433: /* Traverse all thread stacks. */
1434: if (PCR_ERes_IsErr(
1435: PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1436: || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1437: ABORT("Thread stack marking failed\n");
1438: }
1439: }
1440:
1441: # endif /* PCR */
1442:
1443: # ifdef SRC_M3
1444:
1445: # ifdef ALL_INTERIOR_POINTERS
1446: --> misconfigured
1447: # endif
1448:
1449:
1450: extern void ThreadF__ProcessStacks();
1451:
1452: void GC_push_thread_stack(start, stop)
1453: word start, stop;
1454: {
1455: GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1456: }
1457:
1458: /* Push routine with M3 specific calling convention. */
1459: GC_m3_push_root(dummy1, p, dummy2, dummy3)
1460: word *p;
1461: ptr_t dummy1, dummy2;
1462: int dummy3;
1463: {
1464: word q = *p;
1465:
1466: if ((ptr_t)(q) >= GC_least_plausible_heap_addr
1467: && (ptr_t)(q) < GC_greatest_plausible_heap_addr) {
1468: GC_push_one_checked(q,FALSE);
1469: }
1470: }
1471:
1472: /* M3 set equivalent to RTHeap.TracedRefTypes */
1473: typedef struct { int elts[1]; } RefTypeSet;
1474: RefTypeSet GC_TracedRefTypes = {{0x1}};
1475:
1476: /* From finalize.c */
1477: extern void GC_push_finalizer_structures();
1478:
1479: /* From stubborn.c: */
1480: # ifdef STUBBORN_ALLOC
1481: extern GC_PTR * GC_changing_list_start;
1482: # endif
1483:
1484:
1485: void GC_default_push_other_roots()
1486: {
1487: /* Use the M3 provided routine for finding static roots. */
1488: /* This is a bit dubious, since it presumes no C roots. */
1489: /* We handle the collector roots explicitly. */
1490: {
1491: # ifdef STUBBORN_ALLOC
1492: GC_push_one(GC_changing_list_start);
1493: # endif
1494: GC_push_finalizer_structures();
1495: RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1496: }
1497: if (GC_words_allocd > 0) {
1498: ThreadF__ProcessStacks(GC_push_thread_stack);
1499: }
1500: /* Otherwise this isn't absolutely necessary, and we have */
1501: /* startup ordering problems. */
1502: }
1503:
1504: # endif /* SRC_M3 */
1505:
1506: # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
1507: || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
1.2 ! noro 1508: || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
1.1 noro 1509:
1510: extern void GC_push_all_stacks();
1511:
1512: void GC_default_push_other_roots()
1513: {
1514: GC_push_all_stacks();
1515: }
1516:
1517: # endif /* SOLARIS_THREADS || ... */
1518:
1519: void (*GC_push_other_roots)() = GC_default_push_other_roots;
1520:
1521: #endif
1522:
1523: /*
1524: * Routines for accessing dirty bits on virtual pages.
1525: * We plan to eventaually implement four strategies for doing so:
1526: * DEFAULT_VDB: A simple dummy implementation that treats every page
1527: * as possibly dirty. This makes incremental collection
1528: * useless, but the implementation is still correct.
1529: * PCR_VDB: Use PPCRs virtual dirty bit facility.
1530: * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1531: * works under some SVR4 variants. Even then, it may be
1532: * too slow to be entirely satisfactory. Requires reading
1533: * dirty bits for entire address space. Implementations tend
1534: * to assume that the client is a (slow) debugger.
1535: * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1536: * dirtied pages. The implementation (and implementability)
1537: * is highly system dependent. This usually fails when system
1538: * calls write to a protected page. We prevent the read system
1539: * call from doing so. It is the clients responsibility to
1540: * make sure that other system calls are similarly protected
1541: * or write only to the stack.
1542: */
1543:
1544: GC_bool GC_dirty_maintained = FALSE;
1545:
1546: # ifdef DEFAULT_VDB
1547:
1548: /* All of the following assume the allocation lock is held, and */
1549: /* signals are disabled. */
1550:
1551: /* The client asserts that unallocated pages in the heap are never */
1552: /* written. */
1553:
1554: /* Initialize virtual dirty bit implementation. */
1555: void GC_dirty_init()
1556: {
1557: GC_dirty_maintained = TRUE;
1558: }
1559:
1560: /* Retrieve system dirty bits for heap to a local buffer. */
1561: /* Restore the systems notion of which pages are dirty. */
1562: void GC_read_dirty()
1563: {}
1564:
1565: /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1566: /* If the actual page size is different, this returns TRUE if any */
1567: /* of the pages overlapping h are dirty. This routine may err on the */
1568: /* side of labelling pages as dirty (and this implementation does). */
1569: /*ARGSUSED*/
1570: GC_bool GC_page_was_dirty(h)
1571: struct hblk *h;
1572: {
1573: return(TRUE);
1574: }
1575:
1576: /*
1577: * The following two routines are typically less crucial. They matter
1578: * most with large dynamic libraries, or if we can't accurately identify
1579: * stacks, e.g. under Solaris 2.X. Otherwise the following default
1580: * versions are adequate.
1581: */
1582:
1583: /* Could any valid GC heap pointer ever have been written to this page? */
1584: /*ARGSUSED*/
1585: GC_bool GC_page_was_ever_dirty(h)
1586: struct hblk *h;
1587: {
1588: return(TRUE);
1589: }
1590:
1591: /* Reset the n pages starting at h to "was never dirty" status. */
1592: void GC_is_fresh(h, n)
1593: struct hblk *h;
1594: word n;
1595: {
1596: }
1597:
1598: /* A call hints that h is about to be written. */
1599: /* May speed up some dirty bit implementations. */
1600: /*ARGSUSED*/
1601: void GC_write_hint(h)
1602: struct hblk *h;
1603: {
1604: }
1605:
1606: # endif /* DEFAULT_VDB */
1607:
1608:
1609: # ifdef MPROTECT_VDB
1610:
1611: /*
1612: * See DEFAULT_VDB for interface descriptions.
1613: */
1614:
1615: /*
1616: * This implementation maintains dirty bits itself by catching write
1617: * faults and keeping track of them. We assume nobody else catches
1618: * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1619: * except as a result of a read system call. This means clients must
1620: * either ensure that system calls do not touch the heap, or must
1621: * provide their own wrappers analogous to the one for read.
1622: * We assume the page size is a multiple of HBLKSIZE.
1623: * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1624: * tried to use portable code where easily possible. It is known
1625: * not to work under a number of other systems.
1626: */
1627:
1628: # ifndef MSWIN32
1629:
1630: # include <sys/mman.h>
1631: # include <signal.h>
1632: # include <sys/syscall.h>
1633:
1634: # define PROTECT(addr, len) \
1.2 ! noro 1635: if (mprotect((caddr_t)(addr), (size_t)(len), \
1.1 noro 1636: PROT_READ | OPT_PROT_EXEC) < 0) { \
1637: ABORT("mprotect failed"); \
1638: }
1639: # define UNPROTECT(addr, len) \
1.2 ! noro 1640: if (mprotect((caddr_t)(addr), (size_t)(len), \
1.1 noro 1641: PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1642: ABORT("un-mprotect failed"); \
1643: }
1644:
1645: # else
1646:
1647: # include <signal.h>
1648:
1649: static DWORD protect_junk;
1650: # define PROTECT(addr, len) \
1651: if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1652: &protect_junk)) { \
1653: DWORD last_error = GetLastError(); \
1654: GC_printf1("Last error code: %lx\n", last_error); \
1655: ABORT("VirtualProtect failed"); \
1656: }
1657: # define UNPROTECT(addr, len) \
1658: if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1659: &protect_junk)) { \
1660: ABORT("un-VirtualProtect failed"); \
1661: }
1662:
1663: # endif
1664:
1665: #if defined(SUNOS4) || defined(FREEBSD)
1666: typedef void (* SIG_PF)();
1667: #endif
1668: #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX)
1.2 ! noro 1669: # ifdef __STDC__
1.1 noro 1670: typedef void (* SIG_PF)(int);
1.2 ! noro 1671: # else
! 1672: typedef void (* SIG_PF)();
! 1673: # endif
1.1 noro 1674: #endif
1675: #if defined(MSWIN32)
1676: typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
1677: # undef SIG_DFL
1678: # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1679: #endif
1680:
1681: #if defined(IRIX5) || defined(OSF1)
1682: typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1683: #endif
1684: #if defined(SUNOS5SIGS)
1.2 ! noro 1685: # ifdef HPUX
! 1686: # define SIGINFO __siginfo
! 1687: # else
! 1688: # define SIGINFO siginfo
! 1689: # endif
! 1690: # ifdef __STDC__
! 1691: typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
! 1692: # else
! 1693: typedef void (* REAL_SIG_PF)();
! 1694: # endif
1.1 noro 1695: #endif
1696: #if defined(LINUX)
1697: # include <linux/version.h>
1.2 ! noro 1698: # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1.1 noro 1699: typedef struct sigcontext s_c;
1700: # else
1701: typedef struct sigcontext_struct s_c;
1702: # endif
1.2 ! noro 1703: # if defined(ALPHA) || defined(M68K)
! 1704: typedef void (* REAL_SIG_PF)(int, int, s_c *);
! 1705: # else
! 1706: # if defined(IA64)
! 1707: typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
! 1708: # else
! 1709: typedef void (* REAL_SIG_PF)(int, s_c);
! 1710: # endif
! 1711: # endif
1.1 noro 1712: # ifdef ALPHA
1713: /* Retrieve fault address from sigcontext structure by decoding */
1714: /* instruction. */
1715: char * get_fault_addr(s_c *sc) {
1716: unsigned instr;
1717: word faultaddr;
1718:
1719: instr = *((unsigned *)(sc->sc_pc));
1720: faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
1721: faultaddr += (word) (((int)instr << 16) >> 16);
1722: return (char *)faultaddr;
1723: }
1724: # endif /* !ALPHA */
1725: # endif
1726:
1727: SIG_PF GC_old_bus_handler;
1728: SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
1729:
1730: /*ARGSUSED*/
1731: # if defined (SUNOS4) || defined(FREEBSD)
1732: void GC_write_fault_handler(sig, code, scp, addr)
1733: int sig, code;
1734: struct sigcontext *scp;
1735: char * addr;
1736: # ifdef SUNOS4
1737: # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1738: # define CODE_OK (FC_CODE(code) == FC_PROT \
1739: || (FC_CODE(code) == FC_OBJERR \
1740: && FC_ERRNO(code) == FC_PROT))
1741: # endif
1742: # ifdef FREEBSD
1743: # define SIG_OK (sig == SIGBUS)
1744: # define CODE_OK (code == BUS_PAGE_FAULT)
1745: # endif
1746: # endif
1747: # if defined(IRIX5) || defined(OSF1)
1748: # include <errno.h>
1749: void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
1750: # define SIG_OK (sig == SIGSEGV)
1751: # ifdef OSF1
1752: # define CODE_OK (code == 2 /* experimentally determined */)
1753: # endif
1754: # ifdef IRIX5
1755: # define CODE_OK (code == EACCES)
1756: # endif
1757: # endif
1758: # if defined(LINUX)
1.2 ! noro 1759: # if defined(ALPHA) || defined(M68K)
1.1 noro 1760: void GC_write_fault_handler(int sig, int code, s_c * sc)
1761: # else
1.2 ! noro 1762: # if defined(IA64)
! 1763: void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
! 1764: # else
! 1765: void GC_write_fault_handler(int sig, s_c sc)
! 1766: # endif
1.1 noro 1767: # endif
1768: # define SIG_OK (sig == SIGSEGV)
1769: # define CODE_OK TRUE
1.2 ! noro 1770: /* Empirically c.trapno == 14, on IA32, but is that useful? */
! 1771: /* Should probably consider alignment issues on other */
! 1772: /* architectures. */
1.1 noro 1773: # endif
1774: # if defined(SUNOS5SIGS)
1.2 ! noro 1775: # ifdef __STDC__
! 1776: void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
! 1777: # else
! 1778: void GC_write_fault_handler(sig, scp, context)
! 1779: int sig;
! 1780: struct SIGINFO *scp;
! 1781: void * context;
! 1782: # endif
! 1783: # ifdef HPUX
! 1784: # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
! 1785: # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
! 1786: || (scp -> si_code == BUS_ADRERR) \
! 1787: || (scp -> si_code == BUS_UNKNOWN) \
! 1788: || (scp -> si_code == SEGV_UNKNOWN) \
! 1789: || (scp -> si_code == BUS_OBJERR)
! 1790: # else
! 1791: # define SIG_OK (sig == SIGSEGV)
! 1792: # define CODE_OK (scp -> si_code == SEGV_ACCERR)
! 1793: # endif
1.1 noro 1794: # endif
1795: # if defined(MSWIN32)
1796: LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
1797: # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1798: EXCEPTION_ACCESS_VIOLATION)
1799: # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
1800: /* Write fault */
1801: # endif
1802: {
1803: register unsigned i;
1804: # ifdef IRIX5
1805: char * addr = (char *) (size_t) (scp -> sc_badvaddr);
1806: # endif
1807: # if defined(OSF1) && defined(ALPHA)
1808: char * addr = (char *) (scp -> sc_traparg_a0);
1809: # endif
1810: # ifdef SUNOS5SIGS
1811: char * addr = (char *) (scp -> si_addr);
1812: # endif
1813: # ifdef LINUX
1814: # ifdef I386
1815: char * addr = (char *) (sc.cr2);
1816: # else
1817: # if defined(M68K)
1818: char * addr = NULL;
1819:
1820: struct sigcontext *scp = (struct sigcontext *)(&sc);
1821:
1822: int format = (scp->sc_formatvec >> 12) & 0xf;
1823: unsigned long *framedata = (unsigned long *)(scp + 1);
1824: unsigned long ea;
1825:
1826: if (format == 0xa || format == 0xb) {
1827: /* 68020/030 */
1828: ea = framedata[2];
1829: } else if (format == 7) {
1830: /* 68040 */
1831: ea = framedata[3];
1832: } else if (format == 4) {
1833: /* 68060 */
1834: ea = framedata[0];
1835: if (framedata[1] & 0x08000000) {
1836: /* correct addr on misaligned access */
1837: ea = (ea+4095)&(~4095);
1838: }
1839: }
1840: addr = (char *)ea;
1841: # else
1842: # ifdef ALPHA
1843: char * addr = get_fault_addr(sc);
1844: # else
1.2 ! noro 1845: # ifdef IA64
! 1846: char * addr = si -> si_addr;
! 1847: # else
! 1848: # if defined(POWERPC)
! 1849: char * addr = (char *) (sc.regs->dar);
! 1850: # else
1.1 noro 1851: --> architecture not supported
1.2 ! noro 1852: # endif
! 1853: # endif
1.1 noro 1854: # endif
1855: # endif
1856: # endif
1857: # endif
1858: # if defined(MSWIN32)
1859: char * addr = (char *) (exc_info -> ExceptionRecord
1860: -> ExceptionInformation[1]);
1861: # define sig SIGSEGV
1862: # endif
1863:
1864: if (SIG_OK && CODE_OK) {
1865: register struct hblk * h =
1866: (struct hblk *)((word)addr & ~(GC_page_size-1));
1867: GC_bool in_allocd_block;
1868:
1869: # ifdef SUNOS5SIGS
1870: /* Address is only within the correct physical page. */
1871: in_allocd_block = FALSE;
1872: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1873: if (HDR(h+i) != 0) {
1874: in_allocd_block = TRUE;
1875: }
1876: }
1877: # else
1878: in_allocd_block = (HDR(addr) != 0);
1879: # endif
1880: if (!in_allocd_block) {
1881: /* Heap blocks now begin and end on page boundaries */
1882: SIG_PF old_handler;
1883:
1884: if (sig == SIGSEGV) {
1885: old_handler = GC_old_segv_handler;
1886: } else {
1887: old_handler = GC_old_bus_handler;
1888: }
1889: if (old_handler == SIG_DFL) {
1890: # ifndef MSWIN32
1891: GC_err_printf1("Segfault at 0x%lx\n", addr);
1892: ABORT("Unexpected bus error or segmentation fault");
1893: # else
1894: return(EXCEPTION_CONTINUE_SEARCH);
1895: # endif
1896: } else {
1897: # if defined (SUNOS4) || defined(FREEBSD)
1898: (*old_handler) (sig, code, scp, addr);
1899: return;
1900: # endif
1901: # if defined (SUNOS5SIGS)
1902: (*(REAL_SIG_PF)old_handler) (sig, scp, context);
1903: return;
1904: # endif
1905: # if defined (LINUX)
1.2 ! noro 1906: # if defined(ALPHA) || defined(M68K)
1.1 noro 1907: (*(REAL_SIG_PF)old_handler) (sig, code, sc);
1908: # else
1.2 ! noro 1909: # if defined(IA64)
! 1910: (*(REAL_SIG_PF)old_handler) (sig, si, scp);
! 1911: # else
1.1 noro 1912: (*(REAL_SIG_PF)old_handler) (sig, sc);
1.2 ! noro 1913: # endif
1.1 noro 1914: # endif
1915: return;
1916: # endif
1917: # if defined (IRIX5) || defined(OSF1)
1918: (*(REAL_SIG_PF)old_handler) (sig, code, scp);
1919: return;
1920: # endif
1921: # ifdef MSWIN32
1922: return((*old_handler)(exc_info));
1923: # endif
1924: }
1925: }
1926: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1927: register int index = PHT_HASH(h+i);
1928:
1929: set_pht_entry_from_index(GC_dirty_pages, index);
1930: }
1931: UNPROTECT(h, GC_page_size);
1932: # if defined(OSF1) || defined(LINUX)
1933: /* These reset the signal handler each time by default. */
1934: signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
1935: # endif
1936: /* The write may not take place before dirty bits are read. */
1937: /* But then we'll fault again ... */
1938: # ifdef MSWIN32
1939: return(EXCEPTION_CONTINUE_EXECUTION);
1940: # else
1941: return;
1942: # endif
1943: }
1944: #ifdef MSWIN32
1945: return EXCEPTION_CONTINUE_SEARCH;
1946: #else
1947: GC_err_printf1("Segfault at 0x%lx\n", addr);
1948: ABORT("Unexpected bus error or segmentation fault");
1949: #endif
1950: }
1951:
1952: /*
1953: * We hold the allocation lock. We expect block h to be written
1954: * shortly.
1955: */
1956: void GC_write_hint(h)
1957: struct hblk *h;
1958: {
1959: register struct hblk * h_trunc;
1960: register unsigned i;
1961: register GC_bool found_clean;
1962:
1963: if (!GC_dirty_maintained) return;
1964: h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
1965: found_clean = FALSE;
1966: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1967: register int index = PHT_HASH(h_trunc+i);
1968:
1969: if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
1970: found_clean = TRUE;
1971: set_pht_entry_from_index(GC_dirty_pages, index);
1972: }
1973: }
1974: if (found_clean) {
1975: UNPROTECT(h_trunc, GC_page_size);
1976: }
1977: }
1978:
1979: void GC_dirty_init()
1980: {
1981: #if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
1982: struct sigaction act, oldact;
1983: # ifdef IRIX5
1984: act.sa_flags = SA_RESTART;
1985: act.sa_handler = GC_write_fault_handler;
1986: # else
1987: act.sa_flags = SA_RESTART | SA_SIGINFO;
1988: act.sa_sigaction = GC_write_fault_handler;
1989: # endif
1990: (void)sigemptyset(&act.sa_mask);
1991: #endif
1992: # ifdef PRINTSTATS
1993: GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
1994: # endif
1995: GC_dirty_maintained = TRUE;
1996: if (GC_page_size % HBLKSIZE != 0) {
1997: GC_err_printf0("Page size not multiple of HBLKSIZE\n");
1998: ABORT("Page size not multiple of HBLKSIZE");
1999: }
2000: # if defined(SUNOS4) || defined(FREEBSD)
2001: GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2002: if (GC_old_bus_handler == SIG_IGN) {
2003: GC_err_printf0("Previously ignored bus error!?");
2004: GC_old_bus_handler = SIG_DFL;
2005: }
2006: if (GC_old_bus_handler != SIG_DFL) {
2007: # ifdef PRINTSTATS
2008: GC_err_printf0("Replaced other SIGBUS handler\n");
2009: # endif
2010: }
2011: # endif
2012: # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
2013: GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2014: if (GC_old_segv_handler == SIG_IGN) {
2015: GC_err_printf0("Previously ignored segmentation violation!?");
2016: GC_old_segv_handler = SIG_DFL;
2017: }
2018: if (GC_old_segv_handler != SIG_DFL) {
2019: # ifdef PRINTSTATS
2020: GC_err_printf0("Replaced other SIGSEGV handler\n");
2021: # endif
2022: }
2023: # endif
2024: # if defined(SUNOS5SIGS) || defined(IRIX5)
1.2 ! noro 2025: # if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
1.1 noro 2026: sigaction(SIGSEGV, 0, &oldact);
2027: sigaction(SIGSEGV, &act, 0);
2028: # else
2029: sigaction(SIGSEGV, &act, &oldact);
2030: # endif
2031: # if defined(_sigargs)
2032: /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2033: /* sa_sigaction. */
2034: GC_old_segv_handler = oldact.sa_handler;
2035: # else /* Irix 6.x or SUNOS5SIGS */
2036: if (oldact.sa_flags & SA_SIGINFO) {
2037: GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2038: } else {
2039: GC_old_segv_handler = oldact.sa_handler;
2040: }
2041: # endif
2042: if (GC_old_segv_handler == SIG_IGN) {
2043: GC_err_printf0("Previously ignored segmentation violation!?");
2044: GC_old_segv_handler = SIG_DFL;
2045: }
2046: if (GC_old_segv_handler != SIG_DFL) {
2047: # ifdef PRINTSTATS
2048: GC_err_printf0("Replaced other SIGSEGV handler\n");
2049: # endif
2050: }
1.2 ! noro 2051: # ifdef HPUX
! 2052: sigaction(SIGBUS, &act, &oldact);
! 2053: GC_old_bus_handler = oldact.sa_handler;
! 2054: if (GC_old_segv_handler != SIG_DFL) {
! 2055: # ifdef PRINTSTATS
! 2056: GC_err_printf0("Replaced other SIGBUS handler\n");
! 2057: # endif
! 2058: }
! 2059: # endif
1.1 noro 2060: # endif
2061: # if defined(MSWIN32)
2062: GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2063: if (GC_old_segv_handler != NULL) {
2064: # ifdef PRINTSTATS
2065: GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2066: # endif
2067: } else {
2068: GC_old_segv_handler = SIG_DFL;
2069: }
2070: # endif
2071: }
2072:
2073:
2074:
2075: void GC_protect_heap()
2076: {
2077: ptr_t start;
2078: word len;
2079: unsigned i;
2080:
2081: for (i = 0; i < GC_n_heap_sects; i++) {
2082: start = GC_heap_sects[i].hs_start;
2083: len = GC_heap_sects[i].hs_bytes;
2084: PROTECT(start, len);
2085: }
2086: }
2087:
2088: /* We assume that either the world is stopped or its OK to lose dirty */
2089: /* bits while this is happenning (as in GC_enable_incremental). */
2090: void GC_read_dirty()
2091: {
2092: BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2093: (sizeof GC_dirty_pages));
2094: BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2095: GC_protect_heap();
2096: }
2097:
2098: GC_bool GC_page_was_dirty(h)
2099: struct hblk * h;
2100: {
2101: register word index = PHT_HASH(h);
2102:
2103: return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2104: }
2105:
2106: /*
2107: * Acquiring the allocation lock here is dangerous, since this
2108: * can be called from within GC_call_with_alloc_lock, and the cord
2109: * package does so. On systems that allow nested lock acquisition, this
2110: * happens to work.
2111: * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2112: */
2113:
2114: void GC_begin_syscall()
2115: {
2116: if (!I_HOLD_LOCK()) LOCK();
2117: }
2118:
2119: void GC_end_syscall()
2120: {
2121: if (!I_HOLD_LOCK()) UNLOCK();
2122: }
2123:
2124: void GC_unprotect_range(addr, len)
2125: ptr_t addr;
2126: word len;
2127: {
2128: struct hblk * start_block;
2129: struct hblk * end_block;
2130: register struct hblk *h;
2131: ptr_t obj_start;
2132:
2133: if (!GC_incremental) return;
2134: obj_start = GC_base(addr);
2135: if (obj_start == 0) return;
2136: if (GC_base(addr + len - 1) != obj_start) {
2137: ABORT("GC_unprotect_range(range bigger than object)");
2138: }
2139: start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2140: end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2141: end_block += GC_page_size/HBLKSIZE - 1;
2142: for (h = start_block; h <= end_block; h++) {
2143: register word index = PHT_HASH(h);
2144:
2145: set_pht_entry_from_index(GC_dirty_pages, index);
2146: }
2147: UNPROTECT(start_block,
2148: ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2149: }
2150:
2151: #ifndef MSWIN32
2152: /* Replacement for UNIX system call. */
2153: /* Other calls that write to the heap */
2154: /* should be handled similarly. */
2155: # if defined(__STDC__) && !defined(SUNOS4)
2156: # include <unistd.h>
2157: ssize_t read(int fd, void *buf, size_t nbyte)
2158: # else
2159: # ifndef LINT
2160: int read(fd, buf, nbyte)
2161: # else
2162: int GC_read(fd, buf, nbyte)
2163: # endif
2164: int fd;
2165: char *buf;
2166: int nbyte;
2167: # endif
2168: {
2169: int result;
2170:
2171: GC_begin_syscall();
2172: GC_unprotect_range(buf, (word)nbyte);
2173: # ifdef IRIX5
2174: /* Indirect system call may not always be easily available. */
2175: /* We could call _read, but that would interfere with the */
2176: /* libpthread interception of read. */
2177: {
2178: struct iovec iov;
2179:
2180: iov.iov_base = buf;
2181: iov.iov_len = nbyte;
2182: result = readv(fd, &iov, 1);
2183: }
2184: # else
2185: result = syscall(SYS_read, fd, buf, nbyte);
2186: # endif
2187: GC_end_syscall();
2188: return(result);
2189: }
2190: #endif /* !MSWIN32 */
2191:
2192: /*ARGSUSED*/
2193: GC_bool GC_page_was_ever_dirty(h)
2194: struct hblk *h;
2195: {
2196: return(TRUE);
2197: }
2198:
2199: /* Reset the n pages starting at h to "was never dirty" status. */
2200: /*ARGSUSED*/
2201: void GC_is_fresh(h, n)
2202: struct hblk *h;
2203: word n;
2204: {
2205: }
2206:
2207: # endif /* MPROTECT_VDB */
2208:
2209: # ifdef PROC_VDB
2210:
2211: /*
2212: * See DEFAULT_VDB for interface descriptions.
2213: */
2214:
2215: /*
2216: * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2217: * from which we can read page modified bits. This facility is far from
2218: * optimal (e.g. we would like to get the info for only some of the
2219: * address space), but it avoids intercepting system calls.
2220: */
2221:
2222: #include <errno.h>
2223: #include <sys/types.h>
2224: #include <sys/signal.h>
2225: #include <sys/fault.h>
2226: #include <sys/syscall.h>
2227: #include <sys/procfs.h>
2228: #include <sys/stat.h>
2229: #include <fcntl.h>
2230:
2231: #define INITIAL_BUF_SZ 4096
2232: word GC_proc_buf_size = INITIAL_BUF_SZ;
2233: char *GC_proc_buf;
2234:
2235: #ifdef SOLARIS_THREADS
2236: /* We don't have exact sp values for threads. So we count on */
2237: /* occasionally declaring stack pages to be fresh. Thus we */
2238: /* need a real implementation of GC_is_fresh. We can't clear */
2239: /* entries in GC_written_pages, since that would declare all */
2240: /* pages with the given hash address to be fresh. */
2241: # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2242: struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
2243: /* Collisions are dropped. */
2244:
2245: # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2246: # define ADD_FRESH_PAGE(h) \
2247: GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2248: # define PAGE_IS_FRESH(h) \
2249: (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2250: #endif
2251:
2252: /* Add all pages in pht2 to pht1 */
2253: void GC_or_pages(pht1, pht2)
2254: page_hash_table pht1, pht2;
2255: {
2256: register int i;
2257:
2258: for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2259: }
2260:
2261: int GC_proc_fd;
2262:
2263: void GC_dirty_init()
2264: {
2265: int fd;
2266: char buf[30];
2267:
2268: GC_dirty_maintained = TRUE;
2269: if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
2270: register int i;
2271:
2272: for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
2273: # ifdef PRINTSTATS
2274: GC_printf1("Allocated words:%lu:all pages may have been written\n",
2275: (unsigned long)
2276: (GC_words_allocd + GC_words_allocd_before_gc));
2277: # endif
2278: }
2279: sprintf(buf, "/proc/%d", getpid());
2280: fd = open(buf, O_RDONLY);
2281: if (fd < 0) {
2282: ABORT("/proc open failed");
2283: }
2284: GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
2285: close(fd);
2286: if (GC_proc_fd < 0) {
2287: ABORT("/proc ioctl failed");
2288: }
2289: GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
2290: # ifdef SOLARIS_THREADS
2291: GC_fresh_pages = (struct hblk **)
2292: GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
2293: if (GC_fresh_pages == 0) {
2294: GC_err_printf0("No space for fresh pages\n");
2295: EXIT();
2296: }
2297: BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
2298: # endif
2299: }
2300:
2301: /* Ignore write hints. They don't help us here. */
2302: /*ARGSUSED*/
2303: void GC_write_hint(h)
2304: struct hblk *h;
2305: {
2306: }
2307:
2308: #ifdef SOLARIS_THREADS
2309: # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2310: #else
2311: # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2312: #endif
2313:
2314: void GC_read_dirty()
2315: {
2316: unsigned long ps, np;
2317: int nmaps;
2318: ptr_t vaddr;
2319: struct prasmap * map;
2320: char * bufp;
2321: ptr_t current_addr, limit;
2322: int i;
2323: int dummy;
2324:
2325: BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
2326:
2327: bufp = GC_proc_buf;
2328: if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2329: # ifdef PRINTSTATS
2330: GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2331: GC_proc_buf_size);
2332: # endif
2333: {
2334: /* Retry with larger buffer. */
2335: word new_size = 2 * GC_proc_buf_size;
2336: char * new_buf = GC_scratch_alloc(new_size);
2337:
2338: if (new_buf != 0) {
2339: GC_proc_buf = bufp = new_buf;
2340: GC_proc_buf_size = new_size;
2341: }
2342: if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2343: WARN("Insufficient space for /proc read\n", 0);
2344: /* Punt: */
2345: memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
2346: memset(GC_written_pages, 0xff, sizeof(page_hash_table));
2347: # ifdef SOLARIS_THREADS
2348: BZERO(GC_fresh_pages,
2349: MAX_FRESH_PAGES * sizeof (struct hblk *));
2350: # endif
2351: return;
2352: }
2353: }
2354: }
2355: /* Copy dirty bits into GC_grungy_pages */
2356: nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
2357: /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2358: nmaps, PG_REFERENCED, PG_MODIFIED); */
2359: bufp = bufp + sizeof(struct prpageheader);
2360: for (i = 0; i < nmaps; i++) {
2361: map = (struct prasmap *)bufp;
2362: vaddr = (ptr_t)(map -> pr_vaddr);
2363: ps = map -> pr_pagesize;
2364: np = map -> pr_npage;
2365: /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2366: limit = vaddr + ps * np;
2367: bufp += sizeof (struct prasmap);
2368: for (current_addr = vaddr;
2369: current_addr < limit; current_addr += ps){
2370: if ((*bufp++) & PG_MODIFIED) {
2371: register struct hblk * h = (struct hblk *) current_addr;
2372:
2373: while ((ptr_t)h < current_addr + ps) {
2374: register word index = PHT_HASH(h);
2375:
2376: set_pht_entry_from_index(GC_grungy_pages, index);
2377: # ifdef SOLARIS_THREADS
2378: {
2379: register int slot = FRESH_PAGE_SLOT(h);
2380:
2381: if (GC_fresh_pages[slot] == h) {
2382: GC_fresh_pages[slot] = 0;
2383: }
2384: }
2385: # endif
2386: h++;
2387: }
2388: }
2389: }
2390: bufp += sizeof(long) - 1;
2391: bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
2392: }
2393: /* Update GC_written_pages. */
2394: GC_or_pages(GC_written_pages, GC_grungy_pages);
2395: # ifdef SOLARIS_THREADS
2396: /* Make sure that old stacks are considered completely clean */
2397: /* unless written again. */
2398: GC_old_stacks_are_fresh();
2399: # endif
2400: }
2401:
2402: #undef READ
2403:
2404: GC_bool GC_page_was_dirty(h)
2405: struct hblk *h;
2406: {
2407: register word index = PHT_HASH(h);
2408: register GC_bool result;
2409:
2410: result = get_pht_entry_from_index(GC_grungy_pages, index);
2411: # ifdef SOLARIS_THREADS
2412: if (result && PAGE_IS_FRESH(h)) result = FALSE;
2413: /* This happens only if page was declared fresh since */
2414: /* the read_dirty call, e.g. because it's in an unused */
2415: /* thread stack. It's OK to treat it as clean, in */
2416: /* that case. And it's consistent with */
2417: /* GC_page_was_ever_dirty. */
2418: # endif
2419: return(result);
2420: }
2421:
2422: GC_bool GC_page_was_ever_dirty(h)
2423: struct hblk *h;
2424: {
2425: register word index = PHT_HASH(h);
2426: register GC_bool result;
2427:
2428: result = get_pht_entry_from_index(GC_written_pages, index);
2429: # ifdef SOLARIS_THREADS
2430: if (result && PAGE_IS_FRESH(h)) result = FALSE;
2431: # endif
2432: return(result);
2433: }
2434:
2435: /* Caller holds allocation lock. */
2436: void GC_is_fresh(h, n)
2437: struct hblk *h;
2438: word n;
2439: {
2440:
2441: register word index;
2442:
2443: # ifdef SOLARIS_THREADS
2444: register word i;
2445:
2446: if (GC_fresh_pages != 0) {
2447: for (i = 0; i < n; i++) {
2448: ADD_FRESH_PAGE(h + i);
2449: }
2450: }
2451: # endif
2452: }
2453:
2454: # endif /* PROC_VDB */
2455:
2456:
2457: # ifdef PCR_VDB
2458:
2459: # include "vd/PCR_VD.h"
2460:
2461: # define NPAGES (32*1024) /* 128 MB */
2462:
2463: PCR_VD_DB GC_grungy_bits[NPAGES];
2464:
2465: ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
2466: /* HBLKSIZE aligned. */
2467:
2468: void GC_dirty_init()
2469: {
2470: GC_dirty_maintained = TRUE;
2471: /* For the time being, we assume the heap generally grows up */
2472: GC_vd_base = GC_heap_sects[0].hs_start;
2473: if (GC_vd_base == 0) {
2474: ABORT("Bad initial heap segment");
2475: }
2476: if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
2477: != PCR_ERes_okay) {
2478: ABORT("dirty bit initialization failed");
2479: }
2480: }
2481:
2482: void GC_read_dirty()
2483: {
2484: /* lazily enable dirty bits on newly added heap sects */
2485: {
2486: static int onhs = 0;
2487: int nhs = GC_n_heap_sects;
2488: for( ; onhs < nhs; onhs++ ) {
2489: PCR_VD_WriteProtectEnable(
2490: GC_heap_sects[onhs].hs_start,
2491: GC_heap_sects[onhs].hs_bytes );
2492: }
2493: }
2494:
2495:
2496: if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
2497: != PCR_ERes_okay) {
2498: ABORT("dirty bit read failed");
2499: }
2500: }
2501:
2502: GC_bool GC_page_was_dirty(h)
2503: struct hblk *h;
2504: {
2505: if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
2506: return(TRUE);
2507: }
2508: return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
2509: }
2510:
2511: /*ARGSUSED*/
2512: void GC_write_hint(h)
2513: struct hblk *h;
2514: {
2515: PCR_VD_WriteProtectDisable(h, HBLKSIZE);
2516: PCR_VD_WriteProtectEnable(h, HBLKSIZE);
2517: }
2518:
2519: # endif /* PCR_VDB */
2520:
2521: /*
2522: * Call stack save code for debugging.
2523: * Should probably be in mach_dep.c, but that requires reorganization.
2524: */
2525: #if defined(SPARC) && !defined(LINUX)
2526: # if defined(SUNOS4)
2527: # include <machine/frame.h>
2528: # else
2529: # if defined (DRSNX)
2530: # include <sys/sparc/frame.h>
2531: # else
2532: # if defined(OPENBSD)
2533: # include <frame.h>
2534: # else
2535: # include <sys/frame.h>
2536: # endif
2537: # endif
2538: # endif
2539: # if NARGS > 6
2540: --> We only know how to to get the first 6 arguments
2541: # endif
2542:
2543: #ifdef SAVE_CALL_CHAIN
2544: /* Fill in the pc and argument information for up to NFRAMES of my */
2545: /* callers. Ignore my frame and my callers frame. */
2546:
2547: #ifdef OPENBSD
2548: # define FR_SAVFP fr_fp
2549: # define FR_SAVPC fr_pc
2550: #else
2551: # define FR_SAVFP fr_savfp
2552: # define FR_SAVPC fr_savpc
2553: #endif
2554:
2555: void GC_save_callers (info)
2556: struct callinfo info[NFRAMES];
2557: {
2558: struct frame *frame;
2559: struct frame *fp;
2560: int nframes = 0;
2561: word GC_save_regs_in_stack();
2562:
2563: frame = (struct frame *) GC_save_regs_in_stack ();
2564:
2565: for (fp = frame -> FR_SAVFP; fp != 0 && nframes < NFRAMES;
2566: fp = fp -> FR_SAVFP, nframes++) {
2567: register int i;
2568:
2569: info[nframes].ci_pc = fp->FR_SAVPC;
2570: for (i = 0; i < NARGS; i++) {
2571: info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
2572: }
2573: }
2574: if (nframes < NFRAMES) info[nframes].ci_pc = 0;
2575: }
2576:
2577: #endif /* SAVE_CALL_CHAIN */
2578: #endif /* SPARC */
2579:
2580:
2581:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>