Annotation of OpenXM_contrib2/asir2000/gc/os_dep.c, Revision 1.8
1.1 noro 1: /*
1.2 noro 2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
1.1 noro 3: * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
1.2 noro 4: * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
1.1 noro 6: *
7: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9: *
10: * Permission is hereby granted to use or copy this program
11: * for any purpose, provided the above notices are retained on all copies.
12: * Permission to modify the code and to distribute modified code is granted,
13: * provided the above notices are retained, and a notice that the code was
14: * modified is included with the above copyright notice.
15: */
16:
1.4 noro 17: # include "private/gc_priv.h"
1.1 noro 18:
19: # if defined(LINUX) && !defined(POWERPC)
20: # include <linux/version.h>
21: # if (LINUX_VERSION_CODE <= 0x10400)
22: /* Ugly hack to get struct sigcontext_struct definition. Required */
23: /* for some early 1.3.X releases. Will hopefully go away soon. */
24: /* in some later Linux releases, asm/sigcontext.h may have to */
25: /* be included instead. */
26: # define __KERNEL__
27: # include <asm/signal.h>
28: # undef __KERNEL__
29: # else
30: /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31: /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32: /* prototypes, so we have to include the top-level sigcontext.h to */
33: /* make sure the former gets defined to be the latter if appropriate. */
34: # include <features.h>
35: # if 2 <= __GLIBC__
1.2 noro 36: # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
1.1 noro 37: /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38: /* has the right declaration for glibc 2.1. */
39: # include <sigcontext.h>
40: # endif /* 0 == __GLIBC_MINOR__ */
41: # else /* not 2 <= __GLIBC__ */
42: /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43: /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44: # include <asm/sigcontext.h>
45: # endif /* 2 <= __GLIBC__ */
46: # endif
47: # endif
1.4 noro 48: # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
49: && !defined(MSWINCE)
1.1 noro 50: # include <sys/types.h>
51: # if !defined(MSWIN32) && !defined(SUNOS4)
52: # include <unistd.h>
53: # endif
54: # endif
55:
56: # include <stdio.h>
1.4 noro 57: # if defined(MSWINCE)
58: # define SIGSEGV 0 /* value is irrelevant */
59: # else
60: # include <signal.h>
61: # endif
1.1 noro 62:
63: /* Blatantly OS dependent routines, except for those that are related */
1.2 noro 64: /* to dynamic loading. */
1.1 noro 65:
1.6 noro 66: # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
1.1 noro 67: # define NEED_FIND_LIMIT
68: # endif
69:
1.6 noro 70: # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
1.1 noro 71: # define NEED_FIND_LIMIT
72: # endif
73:
1.3 noro 74: # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
1.1 noro 75: # define NEED_FIND_LIMIT
76: # endif
77:
1.6 noro 78: # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
79: || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1.1 noro 80: # define NEED_FIND_LIMIT
81: # endif
82:
1.7 noro 83: #if defined(FREEBSD) && defined(I386)
84: # include <machine/trap.h>
85: # if !defined(PCR)
86: # define NEED_FIND_LIMIT
87: # endif
88: #endif
89:
1.1 noro 90: #ifdef NEED_FIND_LIMIT
91: # include <setjmp.h>
92: #endif
93:
94: #ifdef AMIGA
1.4 noro 95: # define GC_AMIGA_DEF
96: # include "AmigaOS.c"
97: # undef GC_AMIGA_DEF
1.1 noro 98: #endif
99:
1.4 noro 100: #if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 101: # define WIN32_LEAN_AND_MEAN
102: # define NOSERVICE
103: # include <windows.h>
104: #endif
105:
106: #ifdef MACOS
107: # include <Processes.h>
108: #endif
109:
110: #ifdef IRIX5
111: # include <sys/uio.h>
112: # include <malloc.h> /* for locking */
113: #endif
114: #ifdef USE_MMAP
115: # include <sys/types.h>
116: # include <sys/mman.h>
117: # include <sys/stat.h>
1.4 noro 118: #endif
119:
120: #ifdef UNIX_LIKE
1.1 noro 121: # include <fcntl.h>
122: #endif
123:
1.6 noro 124: #if defined(SUNOS5SIGS) || defined (HURD) || defined(LINUX)
125: # ifdef SUNOS5SIGS
126: # include <sys/siginfo.h>
127: # endif
1.1 noro 128: # undef setjmp
129: # undef longjmp
130: # define setjmp(env) sigsetjmp(env, 1)
131: # define longjmp(env, val) siglongjmp(env, val)
132: # define jmp_buf sigjmp_buf
133: #endif
134:
1.7 noro 135: #ifdef DARWIN
136: /* for get_etext and friends */
137: #include <mach-o/getsect.h>
138: #endif
139:
1.1 noro 140: #ifdef DJGPP
1.4 noro 141: /* Apparently necessary for djgpp 2.01. May cause problems with */
1.1 noro 142: /* other versions. */
143: typedef long unsigned int caddr_t;
144: #endif
145:
146: #ifdef PCR
147: # include "il/PCR_IL.h"
148: # include "th/PCR_ThCtl.h"
149: # include "mm/PCR_MM.h"
150: #endif
151:
152: #if !defined(NO_EXECUTE_PERMISSION)
153: # define OPT_PROT_EXEC PROT_EXEC
154: #else
155: # define OPT_PROT_EXEC 0
156: #endif
157:
1.7 noro 158: #if defined(LINUX) && \
159: (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64) || !defined(SMALL_CONFIG))
160:
161: /* We need to parse /proc/self/maps, either to find dynamic libraries, */
162: /* and/or to find the register backing store base (IA64). Do it once */
163: /* here. */
164:
165: #define READ read
166:
167: /* Repeatedly perform a read call until the buffer is filled or */
168: /* we encounter EOF. */
169: ssize_t GC_repeat_read(int fd, char *buf, size_t count)
170: {
171: ssize_t num_read = 0;
172: ssize_t result;
173:
174: while (num_read < count) {
175: result = READ(fd, buf + num_read, count - num_read);
176: if (result < 0) return result;
177: if (result == 0) break;
178: num_read += result;
179: }
180: return num_read;
181: }
182:
183: /*
184: * Apply fn to a buffer containing the contents of /proc/self/maps.
185: * Return the result of fn or, if we failed, 0.
186: */
187:
188: word GC_apply_to_maps(word (*fn)(char *))
189: {
190: int f;
191: int result;
192: int maps_size;
193: char maps_temp[32768];
194: char *maps_buf;
195:
196: /* Read /proc/self/maps */
197: /* Note that we may not allocate, and thus can't use stdio. */
198: f = open("/proc/self/maps", O_RDONLY);
199: if (-1 == f) return 0;
200: /* stat() doesn't work for /proc/self/maps, so we have to
201: read it to find out how large it is... */
202: maps_size = 0;
203: do {
204: result = GC_repeat_read(f, maps_temp, sizeof(maps_temp));
205: if (result <= 0) return 0;
206: maps_size += result;
207: } while (result == sizeof(maps_temp));
208:
209: if (maps_size > sizeof(maps_temp)) {
210: /* If larger than our buffer, close and re-read it. */
211: close(f);
212: f = open("/proc/self/maps", O_RDONLY);
213: if (-1 == f) return 0;
214: maps_buf = alloca(maps_size);
215: if (NULL == maps_buf) return 0;
216: result = GC_repeat_read(f, maps_buf, maps_size);
217: if (result <= 0) return 0;
218: } else {
219: /* Otherwise use the fixed size buffer */
220: maps_buf = maps_temp;
221: }
222:
223: close(f);
224: maps_buf[result] = '\0';
225:
226: /* Apply fn to result. */
227: return fn(maps_buf);
228: }
229:
230: #endif /* Need GC_apply_to_maps */
231:
232: #if defined(LINUX) && (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64))
233: //
234: // GC_parse_map_entry parses an entry from /proc/self/maps so we can
235: // locate all writable data segments that belong to shared libraries.
236: // The format of one of these entries and the fields we care about
237: // is as follows:
238: // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
239: // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
240: // start end prot maj_dev
241: // 0 9 18 32
242: //
243: // For 64 bit ABIs:
244: // 0 17 34 56
245: //
246: // The parser is called with a pointer to the entry and the return value
247: // is either NULL or is advanced to the next entry(the byte after the
248: // trailing '\n'.)
249: //
250: #if CPP_WORDSZ == 32
251: # define OFFSET_MAP_START 0
252: # define OFFSET_MAP_END 9
253: # define OFFSET_MAP_PROT 18
254: # define OFFSET_MAP_MAJDEV 32
255: # define ADDR_WIDTH 8
256: #endif
257:
258: #if CPP_WORDSZ == 64
259: # define OFFSET_MAP_START 0
260: # define OFFSET_MAP_END 17
261: # define OFFSET_MAP_PROT 34
262: # define OFFSET_MAP_MAJDEV 56
263: # define ADDR_WIDTH 16
264: #endif
265:
266: /*
267: * Assign various fields of the first line in buf_ptr to *start, *end,
268: * *prot_buf and *maj_dev. Only *prot_buf may be set for unwritable maps.
269: */
270: char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
271: char *prot_buf, unsigned int *maj_dev)
272: {
273: int i;
274: char *tok;
275:
276: if (buf_ptr == NULL || *buf_ptr == '\0') {
277: return NULL;
278: }
279:
280: memcpy(prot_buf, buf_ptr+OFFSET_MAP_PROT, 4);
281: /* do the protections first. */
282: prot_buf[4] = '\0';
283:
284: if (prot_buf[1] == 'w') {/* we can skip all of this if it's not writable. */
285:
286: tok = buf_ptr;
287: buf_ptr[OFFSET_MAP_START+ADDR_WIDTH] = '\0';
288: *start = strtoul(tok, NULL, 16);
289:
290: tok = buf_ptr+OFFSET_MAP_END;
291: buf_ptr[OFFSET_MAP_END+ADDR_WIDTH] = '\0';
292: *end = strtoul(tok, NULL, 16);
293:
294: buf_ptr += OFFSET_MAP_MAJDEV;
295: tok = buf_ptr;
296: while (*buf_ptr != ':') buf_ptr++;
297: *buf_ptr++ = '\0';
298: *maj_dev = strtoul(tok, NULL, 16);
299: }
300:
301: while (*buf_ptr && *buf_ptr++ != '\n');
302:
303: return buf_ptr;
304: }
305:
306: #endif /* Need to parse /proc/self/maps. */
307:
1.3 noro 308: #if defined(SEARCH_FOR_DATA_START)
1.1 noro 309: /* The I386 case can be handled without a search. The Alpha case */
310: /* used to be handled differently as well, but the rules changed */
311: /* for recent Linux versions. This seems to be the easiest way to */
312: /* cover all versions. */
1.4 noro 313:
314: # ifdef LINUX
1.7 noro 315: /* Some Linux distributions arrange to define __data_start. Some */
316: /* define data_start as a weak symbol. The latter is technically */
317: /* broken, since the user program may define data_start, in which */
318: /* case we lose. Nonetheless, we try both, prefering __data_start. */
319: /* We assume gcc-compatible pragmas. */
1.4 noro 320: # pragma weak __data_start
1.6 noro 321: extern int __data_start[];
1.4 noro 322: # pragma weak data_start
1.6 noro 323: extern int data_start[];
1.4 noro 324: # endif /* LINUX */
1.6 noro 325: extern int _end[];
1.4 noro 326:
1.1 noro 327: ptr_t GC_data_start;
328:
329: void GC_init_linux_data_start()
330: {
331: extern ptr_t GC_find_limit();
332:
1.4 noro 333: # ifdef LINUX
334: /* Try the easy approaches first: */
1.6 noro 335: if ((ptr_t)__data_start != 0) {
336: GC_data_start = (ptr_t)(__data_start);
1.4 noro 337: return;
338: }
1.6 noro 339: if ((ptr_t)data_start != 0) {
340: GC_data_start = (ptr_t)(data_start);
1.4 noro 341: return;
342: }
343: # endif /* LINUX */
1.6 noro 344: GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
1.4 noro 345: }
346: #endif
347:
1.6 noro 348: # ifdef ECOS
349:
350: # ifndef ECOS_GC_MEMORY_SIZE
351: # define ECOS_GC_MEMORY_SIZE (448 * 1024)
352: # endif /* ECOS_GC_MEMORY_SIZE */
353:
354: // setjmp() function, as described in ANSI para 7.6.1.1
355: #define setjmp( __env__ ) hal_setjmp( __env__ )
356:
357: // FIXME: This is a simple way of allocating memory which is
358: // compatible with ECOS early releases. Later releases use a more
359: // sophisticated means of allocating memory than this simple static
360: // allocator, but this method is at least bound to work.
361: static char memory[ECOS_GC_MEMORY_SIZE];
362: static char *brk = memory;
363:
364: static void *tiny_sbrk(ptrdiff_t increment)
365: {
366: void *p = brk;
367:
368: brk += increment;
369:
370: if (brk > memory + sizeof memory)
371: {
372: brk -= increment;
373: return NULL;
374: }
375:
376: return p;
377: }
378: #define sbrk tiny_sbrk
379: # endif /* ECOS */
380:
381: #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
1.4 noro 382: ptr_t GC_data_start;
383:
384: void GC_init_netbsd_elf()
385: {
386: extern ptr_t GC_find_limit();
387: extern char **environ;
388: /* This may need to be environ, without the underscore, for */
389: /* some versions. */
390: GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
1.1 noro 391: }
392: #endif
393:
394: # ifdef OS2
395:
396: # include <stddef.h>
397:
398: # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
399:
400: struct exe_hdr {
401: unsigned short magic_number;
402: unsigned short padding[29];
403: long new_exe_offset;
404: };
405:
406: #define E_MAGIC(x) (x).magic_number
407: #define EMAGIC 0x5A4D
408: #define E_LFANEW(x) (x).new_exe_offset
409:
410: struct e32_exe {
411: unsigned char magic_number[2];
412: unsigned char byte_order;
413: unsigned char word_order;
414: unsigned long exe_format_level;
415: unsigned short cpu;
416: unsigned short os;
417: unsigned long padding1[13];
418: unsigned long object_table_offset;
419: unsigned long object_count;
420: unsigned long padding2[31];
421: };
422:
423: #define E32_MAGIC1(x) (x).magic_number[0]
424: #define E32MAGIC1 'L'
425: #define E32_MAGIC2(x) (x).magic_number[1]
426: #define E32MAGIC2 'X'
427: #define E32_BORDER(x) (x).byte_order
428: #define E32LEBO 0
429: #define E32_WORDER(x) (x).word_order
430: #define E32LEWO 0
431: #define E32_CPU(x) (x).cpu
432: #define E32CPU286 1
433: #define E32_OBJTAB(x) (x).object_table_offset
434: #define E32_OBJCNT(x) (x).object_count
435:
436: struct o32_obj {
437: unsigned long size;
438: unsigned long base;
439: unsigned long flags;
440: unsigned long pagemap;
441: unsigned long mapsize;
442: unsigned long reserved;
443: };
444:
445: #define O32_FLAGS(x) (x).flags
446: #define OBJREAD 0x0001L
447: #define OBJWRITE 0x0002L
448: #define OBJINVALID 0x0080L
449: #define O32_SIZE(x) (x).size
450: #define O32_BASE(x) (x).base
451:
452: # else /* IBM's compiler */
453:
454: /* A kludge to get around what appears to be a header file bug */
455: # ifndef WORD
456: # define WORD unsigned short
457: # endif
458: # ifndef DWORD
459: # define DWORD unsigned long
460: # endif
461:
462: # define EXE386 1
463: # include <newexe.h>
464: # include <exe386.h>
465:
466: # endif /* __IBMC__ */
467:
468: # define INCL_DOSEXCEPTIONS
469: # define INCL_DOSPROCESS
470: # define INCL_DOSERRORS
471: # define INCL_DOSMODULEMGR
472: # define INCL_DOSMEMMGR
473: # include <os2.h>
474:
475:
476: /* Disable and enable signals during nontrivial allocations */
477:
478: void GC_disable_signals(void)
479: {
480: ULONG nest;
481:
482: DosEnterMustComplete(&nest);
483: if (nest != 1) ABORT("nested GC_disable_signals");
484: }
485:
486: void GC_enable_signals(void)
487: {
488: ULONG nest;
489:
490: DosExitMustComplete(&nest);
491: if (nest != 0) ABORT("GC_enable_signals");
492: }
493:
494:
495: # else
496:
497: # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
1.4 noro 498: && !defined(MSWINCE) \
1.6 noro 499: && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
500: && !defined(NOSYS) && !defined(ECOS)
1.1 noro 501:
1.6 noro 502: # if defined(sigmask) && !defined(UTS4) && !defined(HURD)
1.1 noro 503: /* Use the traditional BSD interface */
504: # define SIGSET_T int
505: # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
506: # define SIG_FILL(set) (set) = 0x7fffffff
507: /* Setting the leading bit appears to provoke a bug in some */
508: /* longjmp implementations. Most systems appear not to have */
509: /* a signal 32. */
510: # define SIGSETMASK(old, new) (old) = sigsetmask(new)
511: # else
512: /* Use POSIX/SYSV interface */
513: # define SIGSET_T sigset_t
514: # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
515: # define SIG_FILL(set) sigfillset(&set)
516: # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
517: # endif
518:
519: static GC_bool mask_initialized = FALSE;
520:
521: static SIGSET_T new_mask;
522:
523: static SIGSET_T old_mask;
524:
525: static SIGSET_T dummy;
526:
527: #if defined(PRINTSTATS) && !defined(THREADS)
528: # define CHECK_SIGNALS
529: int GC_sig_disabled = 0;
530: #endif
531:
532: void GC_disable_signals()
533: {
534: if (!mask_initialized) {
535: SIG_FILL(new_mask);
536:
537: SIG_DEL(new_mask, SIGSEGV);
538: SIG_DEL(new_mask, SIGILL);
539: SIG_DEL(new_mask, SIGQUIT);
540: # ifdef SIGBUS
541: SIG_DEL(new_mask, SIGBUS);
542: # endif
543: # ifdef SIGIOT
544: SIG_DEL(new_mask, SIGIOT);
545: # endif
546: # ifdef SIGEMT
547: SIG_DEL(new_mask, SIGEMT);
548: # endif
549: # ifdef SIGTRAP
550: SIG_DEL(new_mask, SIGTRAP);
551: # endif
552: mask_initialized = TRUE;
553: }
554: # ifdef CHECK_SIGNALS
555: if (GC_sig_disabled != 0) ABORT("Nested disables");
556: GC_sig_disabled++;
557: # endif
558: SIGSETMASK(old_mask,new_mask);
559: }
560:
561: void GC_enable_signals()
562: {
563: # ifdef CHECK_SIGNALS
564: if (GC_sig_disabled != 1) ABORT("Unmatched enable");
565: GC_sig_disabled--;
566: # endif
567: SIGSETMASK(dummy,old_mask);
568: }
569:
570: # endif /* !PCR */
571:
572: # endif /*!OS/2 */
573:
574: /* Ivan Demakov: simplest way (to me) */
1.6 noro 575: #if defined (DOS4GW)
1.1 noro 576: void GC_disable_signals() { }
577: void GC_enable_signals() { }
578: #endif
579:
580: /* Find the page size */
581: word GC_page_size;
582:
1.4 noro 583: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 584: void GC_setpagesize()
585: {
1.4 noro 586: GetSystemInfo(&GC_sysinfo);
587: GC_page_size = GC_sysinfo.dwPageSize;
1.1 noro 588: }
589:
590: # else
591: # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
592: || defined(USE_MUNMAP)
593: void GC_setpagesize()
594: {
595: GC_page_size = GETPAGESIZE();
596: }
597: # else
598: /* It's acceptable to fake it. */
599: void GC_setpagesize()
600: {
601: GC_page_size = HBLKSIZE;
602: }
603: # endif
604: # endif
605:
606: /*
607: * Find the base of the stack.
608: * Used only in single-threaded environment.
609: * With threads, GC_mark_roots needs to know how to do this.
610: * Called with allocator lock held.
611: */
1.4 noro 612: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 613: # define is_writable(prot) ((prot) == PAGE_READWRITE \
614: || (prot) == PAGE_WRITECOPY \
615: || (prot) == PAGE_EXECUTE_READWRITE \
616: || (prot) == PAGE_EXECUTE_WRITECOPY)
617: /* Return the number of bytes that are writable starting at p. */
618: /* The pointer p is assumed to be page aligned. */
619: /* If base is not 0, *base becomes the beginning of the */
620: /* allocation region containing p. */
621: word GC_get_writable_length(ptr_t p, ptr_t *base)
622: {
623: MEMORY_BASIC_INFORMATION buf;
624: word result;
625: word protect;
626:
627: result = VirtualQuery(p, &buf, sizeof(buf));
628: if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
629: if (base != 0) *base = (ptr_t)(buf.AllocationBase);
630: protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
631: if (!is_writable(protect)) {
632: return(0);
633: }
634: if (buf.State != MEM_COMMIT) return(0);
635: return(buf.RegionSize);
636: }
637:
638: ptr_t GC_get_stack_base()
639: {
640: int dummy;
641: ptr_t sp = (ptr_t)(&dummy);
642: ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
643: word size = GC_get_writable_length(trunc_sp, 0);
644:
645: return(trunc_sp + size);
646: }
647:
648:
1.4 noro 649: # endif /* MS Windows */
650:
651: # ifdef BEOS
652: # include <kernel/OS.h>
653: ptr_t GC_get_stack_base(){
654: thread_info th;
655: get_thread_info(find_thread(NULL),&th);
656: return th.stack_end;
657: }
658: # endif /* BEOS */
659:
1.1 noro 660:
661: # ifdef OS2
662:
663: ptr_t GC_get_stack_base()
664: {
665: PTIB ptib;
666: PPIB ppib;
667:
668: if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
669: GC_err_printf0("DosGetInfoBlocks failed\n");
670: ABORT("DosGetInfoBlocks failed\n");
671: }
672: return((ptr_t)(ptib -> tib_pstacklimit));
673: }
674:
1.4 noro 675: # endif /* OS2 */
1.1 noro 676:
677: # ifdef AMIGA
1.4 noro 678: # define GC_AMIGA_SB
679: # include "AmigaOS.c"
680: # undef GC_AMIGA_SB
681: # endif /* AMIGA */
1.1 noro 682:
1.4 noro 683: # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
1.1 noro 684:
685: # ifdef __STDC__
686: typedef void (*handler)(int);
687: # else
688: typedef void (*handler)();
689: # endif
690:
1.6 noro 691: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) || defined(HURD)
1.1 noro 692: static struct sigaction old_segv_act;
1.6 noro 693: # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) || defined(HURD)
1.1 noro 694: static struct sigaction old_bus_act;
695: # endif
696: # else
697: static handler old_segv_handler, old_bus_handler;
698: # endif
699:
1.4 noro 700: # ifdef __STDC__
701: void GC_set_and_save_fault_handler(handler h)
702: # else
703: void GC_set_and_save_fault_handler(h)
704: handler h;
705: # endif
1.1 noro 706: {
1.6 noro 707: # if defined(SUNOS5SIGS) || defined(IRIX5) \
708: || defined(OSF1) || defined(HURD)
1.1 noro 709: struct sigaction act;
710:
1.4 noro 711: act.sa_handler = h;
1.6 noro 712: # ifdef SUNOS5SIGS
713: act.sa_flags = SA_RESTART | SA_NODEFER;
714: # else
715: act.sa_flags = SA_RESTART;
716: # endif
1.1 noro 717: /* The presence of SA_NODEFER represents yet another gross */
718: /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
719: /* interact correctly with -lthread. We hide the confusion */
720: /* by making sure that signal handling doesn't affect the */
721: /* signal mask. */
722:
723: (void) sigemptyset(&act.sa_mask);
1.6 noro 724: # ifdef GC_IRIX_THREADS
1.1 noro 725: /* Older versions have a bug related to retrieving and */
726: /* and setting a handler at the same time. */
727: (void) sigaction(SIGSEGV, 0, &old_segv_act);
728: (void) sigaction(SIGSEGV, &act, 0);
729: # else
730: (void) sigaction(SIGSEGV, &act, &old_segv_act);
1.2 noro 731: # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
1.6 noro 732: || defined(HPUX) || defined(HURD)
1.2 noro 733: /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
734: /* Pthreads doesn't exist under Irix 5.x, so we */
735: /* don't have to worry in the threads case. */
1.1 noro 736: (void) sigaction(SIGBUS, &act, &old_bus_act);
737: # endif
1.6 noro 738: # endif /* GC_IRIX_THREADS */
1.1 noro 739: # else
1.4 noro 740: old_segv_handler = signal(SIGSEGV, h);
1.1 noro 741: # ifdef SIGBUS
1.4 noro 742: old_bus_handler = signal(SIGBUS, h);
1.1 noro 743: # endif
744: # endif
745: }
1.4 noro 746: # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
747:
748: # ifdef NEED_FIND_LIMIT
749: /* Some tools to implement HEURISTIC2 */
750: # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
751: /* static */ jmp_buf GC_jmp_buf;
752:
753: /*ARGSUSED*/
754: void GC_fault_handler(sig)
755: int sig;
756: {
757: longjmp(GC_jmp_buf, 1);
758: }
759:
760: void GC_setup_temporary_fault_handler()
761: {
762: GC_set_and_save_fault_handler(GC_fault_handler);
763: }
1.1 noro 764:
765: void GC_reset_fault_handler()
766: {
1.6 noro 767: # if defined(SUNOS5SIGS) || defined(IRIX5) \
768: || defined(OSF1) || defined(HURD)
1.1 noro 769: (void) sigaction(SIGSEGV, &old_segv_act, 0);
1.2 noro 770: # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
1.6 noro 771: || defined(HPUX) || defined(HURD)
1.1 noro 772: (void) sigaction(SIGBUS, &old_bus_act, 0);
773: # endif
774: # else
775: (void) signal(SIGSEGV, old_segv_handler);
776: # ifdef SIGBUS
777: (void) signal(SIGBUS, old_bus_handler);
778: # endif
779: # endif
780: }
781:
782: /* Return the first nonaddressible location > p (up) or */
1.7 noro 783: /* the smallest location q s.t. [q,p) is addressable (!up). */
784: /* We assume that p (up) or p-1 (!up) is addressable. */
1.1 noro 785: ptr_t GC_find_limit(p, up)
786: ptr_t p;
787: GC_bool up;
788: {
789: static VOLATILE ptr_t result;
790: /* Needs to be static, since otherwise it may not be */
791: /* preserved across the longjmp. Can safely be */
792: /* static since it's only called once, with the */
793: /* allocation lock held. */
794:
795:
796: GC_setup_temporary_fault_handler();
797: if (setjmp(GC_jmp_buf) == 0) {
798: result = (ptr_t)(((word)(p))
799: & ~(MIN_PAGE_SIZE-1));
800: for (;;) {
801: if (up) {
802: result += MIN_PAGE_SIZE;
803: } else {
804: result -= MIN_PAGE_SIZE;
805: }
806: GC_noop1((word)(*result));
807: }
808: }
809: GC_reset_fault_handler();
810: if (!up) {
811: result += MIN_PAGE_SIZE;
812: }
813: return(result);
814: }
815: # endif
816:
1.6 noro 817: #if defined(ECOS) || defined(NOSYS)
818: ptr_t GC_get_stack_base()
819: {
820: return STACKBOTTOM;
821: }
822: #endif
823:
1.2 noro 824: #ifdef LINUX_STACKBOTTOM
825:
1.3 noro 826: #include <sys/types.h>
827: #include <sys/stat.h>
1.7 noro 828: #include <ctype.h>
1.3 noro 829:
1.2 noro 830: # define STAT_SKIP 27 /* Number of fields preceding startstack */
1.3 noro 831: /* field in /proc/self/stat */
1.2 noro 832:
1.4 noro 833: # pragma weak __libc_stack_end
834: extern ptr_t __libc_stack_end;
835:
836: # ifdef IA64
1.7 noro 837: /* Try to read the backing store base from /proc/self/maps. */
838: /* We look for the writable mapping with a 0 major device, */
839: /* which is as close to our frame as possible, but below it.*/
840: static word backing_store_base_from_maps(char *maps)
841: {
842: char prot_buf[5];
843: char *buf_ptr = maps;
844: word start, end;
845: unsigned int maj_dev;
846: word current_best = 0;
847: word dummy;
848:
849: for (;;) {
850: buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
851: if (buf_ptr == NULL) return current_best;
852: if (prot_buf[1] == 'w' && maj_dev == 0) {
853: if (end < (word)(&dummy) && start > current_best) current_best = start;
854: }
855: }
856: return current_best;
857: }
858:
859: static word backing_store_base_from_proc(void)
860: {
861: return GC_apply_to_maps(backing_store_base_from_maps);
862: }
863:
1.4 noro 864: # pragma weak __libc_ia64_register_backing_store_base
865: extern ptr_t __libc_ia64_register_backing_store_base;
866:
867: ptr_t GC_get_register_stack_base(void)
868: {
1.6 noro 869: if (0 != &__libc_ia64_register_backing_store_base
870: && 0 != __libc_ia64_register_backing_store_base) {
871: /* Glibc 2.2.4 has a bug such that for dynamically linked */
872: /* executables __libc_ia64_register_backing_store_base is */
1.7 noro 873: /* defined but uninitialized during constructor calls. */
1.6 noro 874: /* Hence we check for both nonzero address and value. */
1.4 noro 875: return __libc_ia64_register_backing_store_base;
876: } else {
1.7 noro 877: word result = backing_store_base_from_proc();
878: if (0 == result) {
879: /* Use dumb heuristics. Works only for default configuration. */
880: result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
881: result += BACKING_STORE_ALIGNMENT - 1;
882: result &= ~(BACKING_STORE_ALIGNMENT - 1);
883: /* Verify that it's at least readable. If not, we goofed. */
884: GC_noop1(*(word *)result);
885: }
1.4 noro 886: return (ptr_t)result;
887: }
888: }
889: # endif
890:
1.2 noro 891: ptr_t GC_linux_stack_base(void)
892: {
1.3 noro 893: /* We read the stack base value from /proc/self/stat. We do this */
894: /* using direct I/O system calls in order to avoid calling malloc */
895: /* in case REDIRECT_MALLOC is defined. */
896: # define STAT_BUF_SIZE 4096
1.7 noro 897: # define STAT_READ read
898: /* Should probably call the real read, if read is wrapped. */
1.3 noro 899: char stat_buf[STAT_BUF_SIZE];
900: int f;
1.2 noro 901: char c;
902: word result = 0;
1.3 noro 903: size_t i, buf_offset = 0;
1.2 noro 904:
1.4 noro 905: /* First try the easy way. This should work for glibc 2.2 */
906: if (0 != &__libc_stack_end) {
1.6 noro 907: # ifdef IA64
908: /* Some versions of glibc set the address 16 bytes too */
909: /* low while the initialization code is running. */
910: if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
911: return __libc_stack_end + 0x10;
912: } /* Otherwise it's not safe to add 16 bytes and we fall */
913: /* back to using /proc. */
914: # else
915: return __libc_stack_end;
916: # endif
1.4 noro 917: }
1.3 noro 918: f = open("/proc/self/stat", O_RDONLY);
919: if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
920: ABORT("Couldn't read /proc/self/stat");
921: }
922: c = stat_buf[buf_offset++];
1.2 noro 923: /* Skip the required number of fields. This number is hopefully */
924: /* constant across all Linux implementations. */
925: for (i = 0; i < STAT_SKIP; ++i) {
1.3 noro 926: while (isspace(c)) c = stat_buf[buf_offset++];
927: while (!isspace(c)) c = stat_buf[buf_offset++];
1.2 noro 928: }
1.3 noro 929: while (isspace(c)) c = stat_buf[buf_offset++];
1.2 noro 930: while (isdigit(c)) {
931: result *= 10;
932: result += c - '0';
1.3 noro 933: c = stat_buf[buf_offset++];
1.2 noro 934: }
1.3 noro 935: close(f);
1.2 noro 936: if (result < 0x10000000) ABORT("Absurd stack bottom value");
937: return (ptr_t)result;
938: }
939:
940: #endif /* LINUX_STACKBOTTOM */
1.1 noro 941:
1.4 noro 942: #ifdef FREEBSD_STACKBOTTOM
943:
944: /* This uses an undocumented sysctl call, but at least one expert */
945: /* believes it will stay. */
946:
947: #include <unistd.h>
948: #include <sys/types.h>
949: #include <sys/sysctl.h>
950:
951: ptr_t GC_freebsd_stack_base(void)
952: {
1.6 noro 953: int nm[2] = {CTL_KERN, KERN_USRSTACK};
954: ptr_t base;
955: size_t len = sizeof(ptr_t);
956: int r = sysctl(nm, 2, &base, &len, NULL, 0);
1.4 noro 957:
958: if (r) ABORT("Error getting stack base");
959:
1.6 noro 960: return base;
1.4 noro 961: }
962:
963: #endif /* FREEBSD_STACKBOTTOM */
964:
965: #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
966: && !defined(MSWINCE) && !defined(OS2)
967:
1.1 noro 968: ptr_t GC_get_stack_base()
969: {
1.7 noro 970: # if defined(HEURISTIC1) || defined(HEURISTIC2) || \
971: defined(LINUX_STACKBOTTOM) || defined(FREEBSD_STACKBOTTOM)
1.1 noro 972: word dummy;
973: ptr_t result;
1.7 noro 974: # endif
1.1 noro 975:
976: # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
977:
978: # ifdef STACKBOTTOM
979: return(STACKBOTTOM);
980: # else
981: # ifdef HEURISTIC1
982: # ifdef STACK_GROWS_DOWN
983: result = (ptr_t)((((word)(&dummy))
984: + STACKBOTTOM_ALIGNMENT_M1)
985: & ~STACKBOTTOM_ALIGNMENT_M1);
986: # else
987: result = (ptr_t)(((word)(&dummy))
988: & ~STACKBOTTOM_ALIGNMENT_M1);
989: # endif
990: # endif /* HEURISTIC1 */
1.2 noro 991: # ifdef LINUX_STACKBOTTOM
992: result = GC_linux_stack_base();
993: # endif
1.4 noro 994: # ifdef FREEBSD_STACKBOTTOM
995: result = GC_freebsd_stack_base();
996: # endif
1.1 noro 997: # ifdef HEURISTIC2
998: # ifdef STACK_GROWS_DOWN
999: result = GC_find_limit((ptr_t)(&dummy), TRUE);
1000: # ifdef HEURISTIC2_LIMIT
1001: if (result > HEURISTIC2_LIMIT
1002: && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
1003: result = HEURISTIC2_LIMIT;
1004: }
1005: # endif
1006: # else
1007: result = GC_find_limit((ptr_t)(&dummy), FALSE);
1008: # ifdef HEURISTIC2_LIMIT
1009: if (result < HEURISTIC2_LIMIT
1010: && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
1011: result = HEURISTIC2_LIMIT;
1012: }
1013: # endif
1014: # endif
1015:
1016: # endif /* HEURISTIC2 */
1017: # ifdef STACK_GROWS_DOWN
1018: if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
1019: # endif
1020: return(result);
1021: # endif /* STACKBOTTOM */
1022: }
1023:
1.4 noro 1024: # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS */
1.1 noro 1025:
1026: /*
1027: * Register static data segment(s) as roots.
1028: * If more data segments are added later then they need to be registered
1029: * add that point (as we do with SunOS dynamic loading),
1030: * or GC_mark_roots needs to check for them (as we do with PCR).
1031: * Called with allocator lock held.
1032: */
1033:
1034: # ifdef OS2
1035:
1036: void GC_register_data_segments()
1037: {
1038: PTIB ptib;
1039: PPIB ppib;
1040: HMODULE module_handle;
1041: # define PBUFSIZ 512
1042: UCHAR path[PBUFSIZ];
1043: FILE * myexefile;
1044: struct exe_hdr hdrdos; /* MSDOS header. */
1045: struct e32_exe hdr386; /* Real header for my executable */
1046: struct o32_obj seg; /* Currrent segment */
1047: int nsegs;
1048:
1049:
1050: if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
1051: GC_err_printf0("DosGetInfoBlocks failed\n");
1052: ABORT("DosGetInfoBlocks failed\n");
1053: }
1054: module_handle = ppib -> pib_hmte;
1055: if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
1056: GC_err_printf0("DosQueryModuleName failed\n");
1057: ABORT("DosGetInfoBlocks failed\n");
1058: }
1059: myexefile = fopen(path, "rb");
1060: if (myexefile == 0) {
1061: GC_err_puts("Couldn't open executable ");
1062: GC_err_puts(path); GC_err_puts("\n");
1063: ABORT("Failed to open executable\n");
1064: }
1065: if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
1066: GC_err_puts("Couldn't read MSDOS header from ");
1067: GC_err_puts(path); GC_err_puts("\n");
1068: ABORT("Couldn't read MSDOS header");
1069: }
1070: if (E_MAGIC(hdrdos) != EMAGIC) {
1071: GC_err_puts("Executable has wrong DOS magic number: ");
1072: GC_err_puts(path); GC_err_puts("\n");
1073: ABORT("Bad DOS magic number");
1074: }
1075: if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
1076: GC_err_puts("Seek to new header failed in ");
1077: GC_err_puts(path); GC_err_puts("\n");
1078: ABORT("Bad DOS magic number");
1079: }
1080: if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
1081: GC_err_puts("Couldn't read MSDOS header from ");
1082: GC_err_puts(path); GC_err_puts("\n");
1083: ABORT("Couldn't read OS/2 header");
1084: }
1085: if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
1086: GC_err_puts("Executable has wrong OS/2 magic number:");
1087: GC_err_puts(path); GC_err_puts("\n");
1088: ABORT("Bad OS/2 magic number");
1089: }
1090: if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
1091: GC_err_puts("Executable %s has wrong byte order: ");
1092: GC_err_puts(path); GC_err_puts("\n");
1093: ABORT("Bad byte order");
1094: }
1095: if ( E32_CPU(hdr386) == E32CPU286) {
1096: GC_err_puts("GC can't handle 80286 executables: ");
1097: GC_err_puts(path); GC_err_puts("\n");
1098: EXIT();
1099: }
1100: if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
1101: SEEK_SET) != 0) {
1102: GC_err_puts("Seek to object table failed: ");
1103: GC_err_puts(path); GC_err_puts("\n");
1104: ABORT("Seek to object table failed");
1105: }
1106: for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
1107: int flags;
1108: if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
1109: GC_err_puts("Couldn't read obj table entry from ");
1110: GC_err_puts(path); GC_err_puts("\n");
1111: ABORT("Couldn't read obj table entry");
1112: }
1113: flags = O32_FLAGS(seg);
1114: if (!(flags & OBJWRITE)) continue;
1115: if (!(flags & OBJREAD)) continue;
1116: if (flags & OBJINVALID) {
1117: GC_err_printf0("Object with invalid pages?\n");
1118: continue;
1119: }
1120: GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
1121: }
1122: }
1123:
1.4 noro 1124: # else /* !OS2 */
1125:
1126: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 1127:
1128: # ifdef MSWIN32
1129: /* Unfortunately, we have to handle win32s very differently from NT, */
1130: /* Since VirtualQuery has very different semantics. In particular, */
1131: /* under win32s a VirtualQuery call on an unmapped page returns an */
1.6 noro 1132: /* invalid result. Under NT, GC_register_data_segments is a noop and */
1.1 noro 1133: /* all real work is done by GC_register_dynamic_libraries. Under */
1134: /* win32s, we cannot find the data segments associated with dll's. */
1.7 noro 1135: /* We register the main data segment here. */
1.6 noro 1136: GC_bool GC_no_win32_dlls = FALSE;
1.7 noro 1137: /* This used to be set for gcc, to avoid dealing with */
1138: /* the structured exception handling issues. But we now have */
1139: /* assembly code to do that right. */
1.1 noro 1140:
1141: void GC_init_win32()
1142: {
1.6 noro 1143: /* if we're running under win32s, assume that no DLLs will be loaded */
1144: DWORD v = GetVersion();
1145: GC_no_win32_dlls |= ((v & 0x80000000) && (v & 0xff) <= 3);
1.1 noro 1146: }
1.4 noro 1147:
1.1 noro 1148: /* Return the smallest address a such that VirtualQuery */
1149: /* returns correct results for all addresses between a and start. */
1150: /* Assumes VirtualQuery returns correct information for start. */
1151: ptr_t GC_least_described_address(ptr_t start)
1152: {
1153: MEMORY_BASIC_INFORMATION buf;
1154: DWORD result;
1155: LPVOID limit;
1156: ptr_t p;
1157: LPVOID q;
1158:
1.4 noro 1159: limit = GC_sysinfo.lpMinimumApplicationAddress;
1.1 noro 1160: p = (ptr_t)((word)start & ~(GC_page_size - 1));
1161: for (;;) {
1162: q = (LPVOID)(p - GC_page_size);
1163: if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
1164: result = VirtualQuery(q, &buf, sizeof(buf));
1165: if (result != sizeof(buf) || buf.AllocationBase == 0) break;
1166: p = (ptr_t)(buf.AllocationBase);
1167: }
1168: return(p);
1169: }
1.4 noro 1170: # endif
1.7 noro 1171:
1172: # ifndef REDIRECT_MALLOC
1173: /* We maintain a linked list of AllocationBase values that we know */
1174: /* correspond to malloc heap sections. Currently this is only called */
1175: /* during a GC. But there is some hope that for long running */
1176: /* programs we will eventually see most heap sections. */
1177:
1178: /* In the long run, it would be more reliable to occasionally walk */
1179: /* the malloc heap with HeapWalk on the default heap. But that */
1180: /* apparently works only for NT-based Windows. */
1181:
1182: /* In the long run, a better data structure would also be nice ... */
1183: struct GC_malloc_heap_list {
1184: void * allocation_base;
1185: struct GC_malloc_heap_list *next;
1186: } *GC_malloc_heap_l = 0;
1187:
1188: /* Is p the base of one of the malloc heap sections we already know */
1189: /* about? */
1190: GC_bool GC_is_malloc_heap_base(ptr_t p)
1191: {
1192: struct GC_malloc_heap_list *q = GC_malloc_heap_l;
1193:
1194: while (0 != q) {
1195: if (q -> allocation_base == p) return TRUE;
1196: q = q -> next;
1197: }
1198: return FALSE;
1199: }
1200:
1201: void *GC_get_allocation_base(void *p)
1202: {
1203: MEMORY_BASIC_INFORMATION buf;
1204: DWORD result = VirtualQuery(p, &buf, sizeof(buf));
1205: if (result != sizeof(buf)) {
1206: ABORT("Weird VirtualQuery result");
1207: }
1208: return buf.AllocationBase;
1209: }
1210:
1211: size_t GC_max_root_size = 100000; /* Appr. largest root size. */
1212:
1213: void GC_add_current_malloc_heap()
1214: {
1215: struct GC_malloc_heap_list *new_l =
1216: malloc(sizeof(struct GC_malloc_heap_list));
1217: void * candidate = GC_get_allocation_base(new_l);
1218:
1219: if (new_l == 0) return;
1220: if (GC_is_malloc_heap_base(candidate)) {
1221: /* Try a little harder to find malloc heap. */
1222: size_t req_size = 10000;
1223: do {
1224: void *p = malloc(req_size);
1225: if (0 == p) { free(new_l); return; }
1226: candidate = GC_get_allocation_base(p);
1227: free(p);
1228: req_size *= 2;
1229: } while (GC_is_malloc_heap_base(candidate)
1230: && req_size < GC_max_root_size/10 && req_size < 500000);
1231: if (GC_is_malloc_heap_base(candidate)) {
1232: free(new_l); return;
1233: }
1234: }
1235: # ifdef CONDPRINT
1236: if (GC_print_stats)
1237: GC_printf1("Found new system malloc AllocationBase at 0x%lx\n",
1238: candidate);
1239: # endif
1240: new_l -> allocation_base = candidate;
1241: new_l -> next = GC_malloc_heap_l;
1242: GC_malloc_heap_l = new_l;
1243: }
1244: # endif /* REDIRECT_MALLOC */
1.1 noro 1245:
1246: /* Is p the start of either the malloc heap, or of one of our */
1247: /* heap sections? */
1248: GC_bool GC_is_heap_base (ptr_t p)
1249: {
1250:
1.7 noro 1251: unsigned i;
1.1 noro 1252:
1253: # ifndef REDIRECT_MALLOC
1.7 noro 1254: static word last_gc_no = -1;
1.1 noro 1255:
1.7 noro 1256: if (last_gc_no != GC_gc_no) {
1257: GC_add_current_malloc_heap();
1258: last_gc_no = GC_gc_no;
1.1 noro 1259: }
1.7 noro 1260: if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
1261: if (GC_is_malloc_heap_base(p)) return TRUE;
1.1 noro 1262: # endif
1263: for (i = 0; i < GC_n_heap_bases; i++) {
1.7 noro 1264: if (GC_heap_bases[i] == p) return TRUE;
1.1 noro 1265: }
1.7 noro 1266: return FALSE ;
1.1 noro 1267: }
1.4 noro 1268:
1269: # ifdef MSWIN32
1.1 noro 1270: void GC_register_root_section(ptr_t static_root)
1271: {
1272: MEMORY_BASIC_INFORMATION buf;
1273: DWORD result;
1274: DWORD protect;
1275: LPVOID p;
1276: char * base;
1277: char * limit, * new_limit;
1278:
1.6 noro 1279: if (!GC_no_win32_dlls) return;
1.1 noro 1280: p = base = limit = GC_least_described_address(static_root);
1.4 noro 1281: while (p < GC_sysinfo.lpMaximumApplicationAddress) {
1.1 noro 1282: result = VirtualQuery(p, &buf, sizeof(buf));
1283: if (result != sizeof(buf) || buf.AllocationBase == 0
1284: || GC_is_heap_base(buf.AllocationBase)) break;
1285: new_limit = (char *)p + buf.RegionSize;
1286: protect = buf.Protect;
1287: if (buf.State == MEM_COMMIT
1288: && is_writable(protect)) {
1289: if ((char *)p == limit) {
1290: limit = new_limit;
1291: } else {
1292: if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1293: base = p;
1294: limit = new_limit;
1295: }
1296: }
1297: if (p > (LPVOID)new_limit /* overflow */) break;
1298: p = (LPVOID)new_limit;
1299: }
1300: if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1301: }
1.4 noro 1302: #endif
1.1 noro 1303:
1304: void GC_register_data_segments()
1305: {
1.4 noro 1306: # ifdef MSWIN32
1.1 noro 1307: static char dummy;
1308: GC_register_root_section((ptr_t)(&dummy));
1309: # endif
1310: }
1311:
1.4 noro 1312: # else /* !OS2 && !Windows */
1.1 noro 1313:
1.4 noro 1314: # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1315: || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1.7 noro 1316: ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)
1.1 noro 1317: int max_page_size;
1318: int * etext_addr;
1319: {
1320: word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1321: & ~(sizeof(word) - 1);
1322: /* etext rounded to word boundary */
1323: word next_page = ((text_end + (word)max_page_size - 1)
1324: & ~((word)max_page_size - 1));
1325: word page_offset = (text_end & ((word)max_page_size - 1));
1326: VOLATILE char * result = (char *)(next_page + page_offset);
1327: /* Note that this isnt equivalent to just adding */
1328: /* max_page_size to &etext if &etext is at a page boundary */
1329:
1330: GC_setup_temporary_fault_handler();
1331: if (setjmp(GC_jmp_buf) == 0) {
1332: /* Try writing to the address. */
1333: *result = *result;
1334: GC_reset_fault_handler();
1335: } else {
1336: GC_reset_fault_handler();
1337: /* We got here via a longjmp. The address is not readable. */
1338: /* This is known to happen under Solaris 2.4 + gcc, which place */
1339: /* string constants in the text segment, but after etext. */
1340: /* Use plan B. Note that we now know there is a gap between */
1341: /* text and data segments, so plan A bought us something. */
1.7 noro 1342: result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
1343: }
1344: return((ptr_t)result);
1345: }
1346: # endif
1347:
1.8 ! saito 1348: // # if defined(FREEBSD) && defined(I386) && !defined(PCR)
! 1349: # if defined(FREEBSD) && ( defined(I386) || defined(X86_64) ) && !defined(PCR)
1.7 noro 1350: /* Its unclear whether this should be identical to the above, or */
1351: /* whether it should apply to non-X86 architectures. */
1352: /* For now we don't assume that there is always an empty page after */
1353: /* etext. But in some cases there actually seems to be slightly more. */
1354: /* This also deals with holes between read-only data and writable data. */
1355: ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)
1356: int max_page_size;
1357: int * etext_addr;
1358: {
1359: word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1360: & ~(sizeof(word) - 1);
1361: /* etext rounded to word boundary */
1362: VOLATILE word next_page = (text_end + (word)max_page_size - 1)
1363: & ~((word)max_page_size - 1);
1364: VOLATILE ptr_t result = (ptr_t)text_end;
1365: GC_setup_temporary_fault_handler();
1366: if (setjmp(GC_jmp_buf) == 0) {
1367: /* Try reading at the address. */
1368: /* This should happen before there is another thread. */
1369: for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
1370: *(VOLATILE char *)next_page;
1371: GC_reset_fault_handler();
1372: } else {
1373: GC_reset_fault_handler();
1374: /* As above, we go to plan B */
1375: result = GC_find_limit((ptr_t)(DATAEND), FALSE);
1.1 noro 1376: }
1.7 noro 1377: return(result);
1.1 noro 1378: }
1.7 noro 1379:
1.1 noro 1380: # endif
1381:
1382:
1.4 noro 1383: #ifdef AMIGA
1384:
1385: # define GC_AMIGA_DS
1386: # include "AmigaOS.c"
1387: # undef GC_AMIGA_DS
1388:
1389: #else /* !OS2 && !Windows && !AMIGA */
1390:
1.1 noro 1391: void GC_register_data_segments()
1392: {
1.7 noro 1393: # if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)
1.6 noro 1394: # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1.1 noro 1395: /* As of Solaris 2.3, the Solaris threads implementation */
1396: /* allocates the data structure for the initial thread with */
1397: /* sbrk at process startup. It needs to be scanned, so that */
1398: /* we don't lose some malloc allocated data structures */
1399: /* hanging from it. We're on thin ice here ... */
1400: extern caddr_t sbrk();
1401:
1402: GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1403: # else
1404: GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1.6 noro 1405: # if defined(DATASTART2)
1406: GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
1407: # endif
1.1 noro 1408: # endif
1409: # endif
1410: # if defined(MACOS)
1411: {
1412: # if defined(THINK_C)
1413: extern void* GC_MacGetDataStart(void);
1414: /* globals begin above stack and end at a5. */
1415: GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1416: (ptr_t)LMGetCurrentA5(), FALSE);
1417: # else
1418: # if defined(__MWERKS__)
1419: # if !__POWERPC__
1420: extern void* GC_MacGetDataStart(void);
1421: /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1422: # if __option(far_data)
1423: extern void* GC_MacGetDataEnd(void);
1424: # endif
1425: /* globals begin above stack and end at a5. */
1426: GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1427: (ptr_t)LMGetCurrentA5(), FALSE);
1428: /* MATTHEW: Handle Far Globals */
1429: # if __option(far_data)
1430: /* Far globals follow he QD globals: */
1431: GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1432: (ptr_t)GC_MacGetDataEnd(), FALSE);
1433: # endif
1434: # else
1435: extern char __data_start__[], __data_end__[];
1436: GC_add_roots_inner((ptr_t)&__data_start__,
1437: (ptr_t)&__data_end__, FALSE);
1438: # endif /* __POWERPC__ */
1439: # endif /* __MWERKS__ */
1440: # endif /* !THINK_C */
1441: }
1442: # endif /* MACOS */
1443:
1444: /* Dynamic libraries are added at every collection, since they may */
1445: /* change. */
1446: }
1447:
1448: # endif /* ! AMIGA */
1.4 noro 1449: # endif /* ! MSWIN32 && ! MSWINCE*/
1.1 noro 1450: # endif /* ! OS2 */
1451:
1452: /*
1453: * Auxiliary routines for obtaining memory from OS.
1454: */
1.4 noro 1455:
1.1 noro 1456: # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1.4 noro 1457: && !defined(MSWIN32) && !defined(MSWINCE) \
1458: && !defined(MACOS) && !defined(DOS4GW)
1.1 noro 1459:
1460: # ifdef SUNOS4
1461: extern caddr_t sbrk();
1462: # endif
1463: # ifdef __STDC__
1464: # define SBRK_ARG_T ptrdiff_t
1465: # else
1466: # define SBRK_ARG_T int
1467: # endif
1468:
1.4 noro 1469:
1.1 noro 1470: # ifdef RS6000
1471: /* The compiler seems to generate speculative reads one past the end of */
1472: /* an allocated object. Hence we need to make sure that the page */
1473: /* following the last heap page is also mapped. */
1474: ptr_t GC_unix_get_mem(bytes)
1475: word bytes;
1476: {
1477: caddr_t cur_brk = (caddr_t)sbrk(0);
1478: caddr_t result;
1479: SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1480: static caddr_t my_brk_val = 0;
1481:
1482: if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1483: if (lsbs != 0) {
1484: if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1485: }
1486: if (cur_brk == my_brk_val) {
1487: /* Use the extra block we allocated last time. */
1488: result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1489: if (result == (caddr_t)(-1)) return(0);
1490: result -= GC_page_size;
1491: } else {
1492: result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1493: if (result == (caddr_t)(-1)) return(0);
1494: }
1495: my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1496: return((ptr_t)result);
1497: }
1498:
1499: #else /* Not RS6000 */
1500:
1501: #if defined(USE_MMAP)
1.4 noro 1502: /* Tested only under Linux, IRIX5 and Solaris 2 */
1.1 noro 1503:
1504: #ifdef USE_MMAP_FIXED
1505: # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1506: /* Seems to yield better performance on Solaris 2, but can */
1507: /* be unreliable if something is already mapped at the address. */
1508: #else
1509: # define GC_MMAP_FLAGS MAP_PRIVATE
1510: #endif
1511:
1.4 noro 1512: #ifndef HEAP_START
1513: # define HEAP_START 0
1514: #endif
1515:
1.1 noro 1516: ptr_t GC_unix_get_mem(bytes)
1517: word bytes;
1518: {
1519: void *result;
1520: static ptr_t last_addr = HEAP_START;
1521:
1.7 noro 1522: # ifndef USE_MMAP_ANON
1523: static GC_bool initialized = FALSE;
1524: static int fd;
1525:
1526: if (!initialized) {
1527: fd = open("/dev/zero", O_RDONLY);
1528: fcntl(fd, F_SETFD, FD_CLOEXEC);
1529: initialized = TRUE;
1530: }
1531: # endif
1532:
1.1 noro 1533: if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1.7 noro 1534: # ifdef USE_MMAP_ANON
1535: result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1536: GC_MMAP_FLAGS | MAP_ANON, -1, 0/* offset */);
1537: # else
1538: result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1539: GC_MMAP_FLAGS, fd, 0/* offset */);
1540: # endif
1.1 noro 1541: if (result == MAP_FAILED) return(0);
1542: last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1543: last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1.4 noro 1544: # if !defined(LINUX)
1545: if (last_addr == 0) {
1546: /* Oops. We got the end of the address space. This isn't */
1547: /* usable by arbitrary C code, since one-past-end pointers */
1548: /* don't work, so we discard it and try again. */
1549: munmap(result, (size_t)(-GC_page_size) - (size_t)result);
1550: /* Leave last page mapped, so we can't repeat. */
1551: return GC_unix_get_mem(bytes);
1552: }
1553: # else
1554: GC_ASSERT(last_addr != 0);
1555: # endif
1.1 noro 1556: return((ptr_t)result);
1557: }
1558:
1559: #else /* Not RS6000, not USE_MMAP */
1560: ptr_t GC_unix_get_mem(bytes)
1561: word bytes;
1562: {
1563: ptr_t result;
1564: # ifdef IRIX5
1565: /* Bare sbrk isn't thread safe. Play by malloc rules. */
1566: /* The equivalent may be needed on other systems as well. */
1567: __LOCK_MALLOC();
1568: # endif
1569: {
1570: ptr_t cur_brk = (ptr_t)sbrk(0);
1571: SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1572:
1573: if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1574: if (lsbs != 0) {
1575: if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1576: }
1577: result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1578: if (result == (ptr_t)(-1)) result = 0;
1579: }
1580: # ifdef IRIX5
1581: __UNLOCK_MALLOC();
1582: # endif
1583: return(result);
1584: }
1585:
1586: #endif /* Not USE_MMAP */
1587: #endif /* Not RS6000 */
1588:
1589: # endif /* UN*X */
1590:
1591: # ifdef OS2
1592:
1593: void * os2_alloc(size_t bytes)
1594: {
1595: void * result;
1596:
1597: if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1598: PAG_WRITE | PAG_COMMIT)
1599: != NO_ERROR) {
1600: return(0);
1601: }
1602: if (result == 0) return(os2_alloc(bytes));
1603: return(result);
1604: }
1605:
1606: # endif /* OS2 */
1607:
1608:
1.4 noro 1609: # if defined(MSWIN32) || defined(MSWINCE)
1610: SYSTEM_INFO GC_sysinfo;
1611: # endif
1612:
1.6 noro 1613: # ifdef MSWIN32
1614:
1615: # ifdef USE_GLOBAL_ALLOC
1616: # define GLOBAL_ALLOC_TEST 1
1617: # else
1618: # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
1619: # endif
1.4 noro 1620:
1.1 noro 1621: word GC_n_heap_bases = 0;
1622:
1623: ptr_t GC_win32_get_mem(bytes)
1624: word bytes;
1625: {
1626: ptr_t result;
1.4 noro 1627:
1.6 noro 1628: if (GLOBAL_ALLOC_TEST) {
1.1 noro 1629: /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1630: /* There are also unconfirmed rumors of other */
1631: /* problems, so we dodge the issue. */
1632: result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1633: result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1634: } else {
1.6 noro 1635: /* VirtualProtect only works on regions returned by a */
1636: /* single VirtualAlloc call. Thus we allocate one */
1637: /* extra page, which will prevent merging of blocks */
1638: /* in separate regions, and eliminate any temptation */
1639: /* to call VirtualProtect on a range spanning regions. */
1640: /* This wastes a small amount of memory, and risks */
1641: /* increased fragmentation. But better alternatives */
1642: /* would require effort. */
1643: result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
1.1 noro 1644: MEM_COMMIT | MEM_RESERVE,
1645: PAGE_EXECUTE_READWRITE);
1646: }
1647: if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1648: /* If I read the documentation correctly, this can */
1649: /* only happen if HBLKSIZE > 64k or not a power of 2. */
1650: if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1651: GC_heap_bases[GC_n_heap_bases++] = result;
1652: return(result);
1653: }
1654:
1655: void GC_win32_free_heap ()
1656: {
1.6 noro 1657: if (GC_no_win32_dlls) {
1.1 noro 1658: while (GC_n_heap_bases > 0) {
1659: GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1660: GC_heap_bases[GC_n_heap_bases] = 0;
1661: }
1662: }
1663: }
1.4 noro 1664: # endif
1665:
1666: #ifdef AMIGA
1667: # define GC_AMIGA_AM
1668: # include "AmigaOS.c"
1669: # undef GC_AMIGA_AM
1670: #endif
1.1 noro 1671:
1672:
1.4 noro 1673: # ifdef MSWINCE
1674: word GC_n_heap_bases = 0;
1675:
1676: ptr_t GC_wince_get_mem(bytes)
1677: word bytes;
1678: {
1679: ptr_t result;
1680: word i;
1681:
1682: /* Round up allocation size to multiple of page size */
1683: bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
1684:
1685: /* Try to find reserved, uncommitted pages */
1686: for (i = 0; i < GC_n_heap_bases; i++) {
1687: if (((word)(-(signed_word)GC_heap_lengths[i])
1688: & (GC_sysinfo.dwAllocationGranularity-1))
1689: >= bytes) {
1690: result = GC_heap_bases[i] + GC_heap_lengths[i];
1691: break;
1692: }
1693: }
1694:
1695: if (i == GC_n_heap_bases) {
1696: /* Reserve more pages */
1697: word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
1698: & ~(GC_sysinfo.dwAllocationGranularity-1);
1.6 noro 1699: /* If we ever support MPROTECT_VDB here, we will probably need to */
1700: /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
1701: /* never spans regions. It seems to be OK for a VirtualFree argument */
1702: /* to span regions, so we should be OK for now. */
1.4 noro 1703: result = (ptr_t) VirtualAlloc(NULL, res_bytes,
1704: MEM_RESERVE | MEM_TOP_DOWN,
1705: PAGE_EXECUTE_READWRITE);
1706: if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1707: /* If I read the documentation correctly, this can */
1708: /* only happen if HBLKSIZE > 64k or not a power of 2. */
1709: if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1710: GC_heap_bases[GC_n_heap_bases] = result;
1711: GC_heap_lengths[GC_n_heap_bases] = 0;
1712: GC_n_heap_bases++;
1713: }
1714:
1715: /* Commit pages */
1716: result = (ptr_t) VirtualAlloc(result, bytes,
1717: MEM_COMMIT,
1718: PAGE_EXECUTE_READWRITE);
1719: if (result != NULL) {
1720: if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1721: GC_heap_lengths[i] += bytes;
1722: }
1723:
1724: return(result);
1725: }
1.1 noro 1726: # endif
1727:
1728: #ifdef USE_MUNMAP
1729:
1.4 noro 1730: /* For now, this only works on Win32/WinCE and some Unix-like */
1731: /* systems. If you have something else, don't define */
1732: /* USE_MUNMAP. */
1.1 noro 1733: /* We assume ANSI C to support this feature. */
1.4 noro 1734:
1735: #if !defined(MSWIN32) && !defined(MSWINCE)
1736:
1.1 noro 1737: #include <unistd.h>
1738: #include <sys/mman.h>
1739: #include <sys/stat.h>
1740: #include <sys/types.h>
1.4 noro 1741:
1742: #endif
1.1 noro 1743:
1744: /* Compute a page aligned starting address for the unmap */
1745: /* operation on a block of size bytes starting at start. */
1746: /* Return 0 if the block is too small to make this feasible. */
1747: ptr_t GC_unmap_start(ptr_t start, word bytes)
1748: {
1749: ptr_t result = start;
1750: /* Round start to next page boundary. */
1751: result += GC_page_size - 1;
1752: result = (ptr_t)((word)result & ~(GC_page_size - 1));
1753: if (result + GC_page_size > start + bytes) return 0;
1754: return result;
1755: }
1756:
1757: /* Compute end address for an unmap operation on the indicated */
1758: /* block. */
1759: ptr_t GC_unmap_end(ptr_t start, word bytes)
1760: {
1761: ptr_t end_addr = start + bytes;
1762: end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1763: return end_addr;
1764: }
1765:
1.4 noro 1766: /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1767: /* memory using VirtualAlloc and VirtualFree. These functions */
1768: /* work on individual allocations of virtual memory, made */
1769: /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1770: /* The ranges we need to (de)commit may span several of these */
1771: /* allocations; therefore we use VirtualQuery to check */
1772: /* allocation lengths, and split up the range as necessary. */
1773:
1.1 noro 1774: /* We assume that GC_remap is called on exactly the same range */
1775: /* as a previous call to GC_unmap. It is safe to consistently */
1776: /* round the endpoints in both places. */
1777: void GC_unmap(ptr_t start, word bytes)
1778: {
1779: ptr_t start_addr = GC_unmap_start(start, bytes);
1780: ptr_t end_addr = GC_unmap_end(start, bytes);
1781: word len = end_addr - start_addr;
1782: if (0 == start_addr) return;
1.4 noro 1783: # if defined(MSWIN32) || defined(MSWINCE)
1784: while (len != 0) {
1785: MEMORY_BASIC_INFORMATION mem_info;
1786: GC_word free_len;
1787: if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1788: != sizeof(mem_info))
1789: ABORT("Weird VirtualQuery result");
1790: free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1791: if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1792: ABORT("VirtualFree failed");
1793: GC_unmapped_bytes += free_len;
1794: start_addr += free_len;
1795: len -= free_len;
1796: }
1797: # else
1798: if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1799: GC_unmapped_bytes += len;
1800: # endif
1.1 noro 1801: }
1802:
1803:
1804: void GC_remap(ptr_t start, word bytes)
1805: {
1806: static int zero_descr = -1;
1807: ptr_t start_addr = GC_unmap_start(start, bytes);
1808: ptr_t end_addr = GC_unmap_end(start, bytes);
1809: word len = end_addr - start_addr;
1810: ptr_t result;
1811:
1.4 noro 1812: # if defined(MSWIN32) || defined(MSWINCE)
1813: if (0 == start_addr) return;
1814: while (len != 0) {
1815: MEMORY_BASIC_INFORMATION mem_info;
1816: GC_word alloc_len;
1817: if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1818: != sizeof(mem_info))
1819: ABORT("Weird VirtualQuery result");
1820: alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1821: result = VirtualAlloc(start_addr, alloc_len,
1822: MEM_COMMIT,
1823: PAGE_EXECUTE_READWRITE);
1824: if (result != start_addr) {
1825: ABORT("VirtualAlloc remapping failed");
1826: }
1827: GC_unmapped_bytes -= alloc_len;
1828: start_addr += alloc_len;
1829: len -= alloc_len;
1830: }
1831: # else
1832: if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1.7 noro 1833: fcntl(zero_descr, F_SETFD, FD_CLOEXEC);
1.4 noro 1834: if (0 == start_addr) return;
1835: result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1836: MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1837: if (result != start_addr) {
1838: ABORT("mmap remapping failed");
1839: }
1840: GC_unmapped_bytes -= len;
1841: # endif
1.1 noro 1842: }
1843:
1844: /* Two adjacent blocks have already been unmapped and are about to */
1845: /* be merged. Unmap the whole block. This typically requires */
1846: /* that we unmap a small section in the middle that was not previously */
1847: /* unmapped due to alignment constraints. */
1848: void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1849: {
1850: ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1851: ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1852: ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1853: ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1854: ptr_t start_addr = end1_addr;
1855: ptr_t end_addr = start2_addr;
1856: word len;
1857: GC_ASSERT(start1 + bytes1 == start2);
1858: if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1859: if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1860: if (0 == start_addr) return;
1861: len = end_addr - start_addr;
1.4 noro 1862: # if defined(MSWIN32) || defined(MSWINCE)
1863: while (len != 0) {
1864: MEMORY_BASIC_INFORMATION mem_info;
1865: GC_word free_len;
1866: if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1867: != sizeof(mem_info))
1868: ABORT("Weird VirtualQuery result");
1869: free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1870: if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1871: ABORT("VirtualFree failed");
1872: GC_unmapped_bytes += free_len;
1873: start_addr += free_len;
1874: len -= free_len;
1875: }
1876: # else
1877: if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1878: GC_unmapped_bytes += len;
1879: # endif
1.1 noro 1880: }
1881:
1882: #endif /* USE_MUNMAP */
1883:
1884: /* Routine for pushing any additional roots. In THREADS */
1885: /* environment, this is also responsible for marking from */
1.4 noro 1886: /* thread stacks. */
1.1 noro 1887: #ifndef THREADS
1888: void (*GC_push_other_roots)() = 0;
1889: #else /* THREADS */
1890:
1891: # ifdef PCR
1892: PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1893: {
1894: struct PCR_ThCtl_TInfoRep info;
1895: PCR_ERes result;
1896:
1897: info.ti_stkLow = info.ti_stkHi = 0;
1898: result = PCR_ThCtl_GetInfo(t, &info);
1899: GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1900: return(result);
1901: }
1902:
1903: /* Push the contents of an old object. We treat this as stack */
1904: /* data only becasue that makes it robust against mark stack */
1905: /* overflow. */
1906: PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1907: {
1908: GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1909: return(PCR_ERes_okay);
1910: }
1911:
1912:
1.4 noro 1913: void GC_default_push_other_roots GC_PROTO((void))
1.1 noro 1914: {
1915: /* Traverse data allocated by previous memory managers. */
1916: {
1917: extern struct PCR_MM_ProcsRep * GC_old_allocator;
1918:
1919: if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1920: GC_push_old_obj, 0)
1921: != PCR_ERes_okay) {
1922: ABORT("Old object enumeration failed");
1923: }
1924: }
1925: /* Traverse all thread stacks. */
1926: if (PCR_ERes_IsErr(
1927: PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1928: || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1929: ABORT("Thread stack marking failed\n");
1930: }
1931: }
1932:
1933: # endif /* PCR */
1934:
1935: # ifdef SRC_M3
1936:
1937: # ifdef ALL_INTERIOR_POINTERS
1938: --> misconfigured
1939: # endif
1940:
1.4 noro 1941: void GC_push_thread_structures GC_PROTO((void))
1942: {
1943: /* Not our responsibibility. */
1944: }
1.1 noro 1945:
1946: extern void ThreadF__ProcessStacks();
1947:
1948: void GC_push_thread_stack(start, stop)
1949: word start, stop;
1950: {
1951: GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1952: }
1953:
1954: /* Push routine with M3 specific calling convention. */
1955: GC_m3_push_root(dummy1, p, dummy2, dummy3)
1956: word *p;
1957: ptr_t dummy1, dummy2;
1958: int dummy3;
1959: {
1960: word q = *p;
1961:
1.4 noro 1962: GC_PUSH_ONE_STACK(q, p);
1.1 noro 1963: }
1964:
1965: /* M3 set equivalent to RTHeap.TracedRefTypes */
1966: typedef struct { int elts[1]; } RefTypeSet;
1967: RefTypeSet GC_TracedRefTypes = {{0x1}};
1968:
1.4 noro 1969: void GC_default_push_other_roots GC_PROTO((void))
1.1 noro 1970: {
1.4 noro 1971: /* Use the M3 provided routine for finding static roots. */
1972: /* This is a bit dubious, since it presumes no C roots. */
1973: /* We handle the collector roots explicitly in GC_push_roots */
1974: RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1.1 noro 1975: if (GC_words_allocd > 0) {
1976: ThreadF__ProcessStacks(GC_push_thread_stack);
1977: }
1978: /* Otherwise this isn't absolutely necessary, and we have */
1979: /* startup ordering problems. */
1980: }
1981:
1982: # endif /* SRC_M3 */
1983:
1.6 noro 1984: # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
1985: defined(GC_WIN32_THREADS)
1.1 noro 1986:
1987: extern void GC_push_all_stacks();
1988:
1.4 noro 1989: void GC_default_push_other_roots GC_PROTO((void))
1.1 noro 1990: {
1991: GC_push_all_stacks();
1992: }
1993:
1.6 noro 1994: # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
1.1 noro 1995:
1.4 noro 1996: void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
1.1 noro 1997:
1.6 noro 1998: #endif /* THREADS */
1.1 noro 1999:
2000: /*
2001: * Routines for accessing dirty bits on virtual pages.
1.4 noro 2002: * We plan to eventually implement four strategies for doing so:
1.1 noro 2003: * DEFAULT_VDB: A simple dummy implementation that treats every page
2004: * as possibly dirty. This makes incremental collection
2005: * useless, but the implementation is still correct.
2006: * PCR_VDB: Use PPCRs virtual dirty bit facility.
2007: * PROC_VDB: Use the /proc facility for reading dirty bits. Only
2008: * works under some SVR4 variants. Even then, it may be
2009: * too slow to be entirely satisfactory. Requires reading
2010: * dirty bits for entire address space. Implementations tend
2011: * to assume that the client is a (slow) debugger.
2012: * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2013: * dirtied pages. The implementation (and implementability)
2014: * is highly system dependent. This usually fails when system
2015: * calls write to a protected page. We prevent the read system
2016: * call from doing so. It is the clients responsibility to
2017: * make sure that other system calls are similarly protected
2018: * or write only to the stack.
2019: */
2020: GC_bool GC_dirty_maintained = FALSE;
2021:
2022: # ifdef DEFAULT_VDB
2023:
2024: /* All of the following assume the allocation lock is held, and */
2025: /* signals are disabled. */
2026:
2027: /* The client asserts that unallocated pages in the heap are never */
2028: /* written. */
2029:
2030: /* Initialize virtual dirty bit implementation. */
2031: void GC_dirty_init()
2032: {
1.7 noro 2033: # ifdef PRINTSTATS
2034: GC_printf0("Initializing DEFAULT_VDB...\n");
2035: # endif
1.1 noro 2036: GC_dirty_maintained = TRUE;
2037: }
2038:
2039: /* Retrieve system dirty bits for heap to a local buffer. */
2040: /* Restore the systems notion of which pages are dirty. */
2041: void GC_read_dirty()
2042: {}
2043:
2044: /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2045: /* If the actual page size is different, this returns TRUE if any */
2046: /* of the pages overlapping h are dirty. This routine may err on the */
2047: /* side of labelling pages as dirty (and this implementation does). */
2048: /*ARGSUSED*/
2049: GC_bool GC_page_was_dirty(h)
2050: struct hblk *h;
2051: {
2052: return(TRUE);
2053: }
2054:
2055: /*
2056: * The following two routines are typically less crucial. They matter
2057: * most with large dynamic libraries, or if we can't accurately identify
2058: * stacks, e.g. under Solaris 2.X. Otherwise the following default
2059: * versions are adequate.
2060: */
2061:
2062: /* Could any valid GC heap pointer ever have been written to this page? */
2063: /*ARGSUSED*/
2064: GC_bool GC_page_was_ever_dirty(h)
2065: struct hblk *h;
2066: {
2067: return(TRUE);
2068: }
2069:
2070: /* Reset the n pages starting at h to "was never dirty" status. */
2071: void GC_is_fresh(h, n)
2072: struct hblk *h;
2073: word n;
2074: {
2075: }
2076:
1.6 noro 2077: /* A call that: */
2078: /* I) hints that [h, h+nblocks) is about to be written. */
2079: /* II) guarantees that protection is removed. */
2080: /* (I) may speed up some dirty bit implementations. */
2081: /* (II) may be essential if we need to ensure that */
2082: /* pointer-free system call buffers in the heap are */
2083: /* not protected. */
1.1 noro 2084: /*ARGSUSED*/
1.6 noro 2085: void GC_remove_protection(h, nblocks, is_ptrfree)
1.1 noro 2086: struct hblk *h;
1.6 noro 2087: word nblocks;
2088: GC_bool is_ptrfree;
1.1 noro 2089: {
2090: }
2091:
2092: # endif /* DEFAULT_VDB */
2093:
2094:
2095: # ifdef MPROTECT_VDB
2096:
2097: /*
2098: * See DEFAULT_VDB for interface descriptions.
2099: */
2100:
2101: /*
2102: * This implementation maintains dirty bits itself by catching write
2103: * faults and keeping track of them. We assume nobody else catches
1.6 noro 2104: * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
2105: * This means that clients must ensure that system calls don't write
2106: * to the write-protected heap. Probably the best way to do this is to
2107: * ensure that system calls write at most to POINTERFREE objects in the
2108: * heap, and do even that only if we are on a platform on which those
2109: * are not protected. Another alternative is to wrap system calls
2110: * (see example for read below), but the current implementation holds
2111: * a lock across blocking calls, making it problematic for multithreaded
2112: * applications.
1.1 noro 2113: * We assume the page size is a multiple of HBLKSIZE.
1.6 noro 2114: * We prefer them to be the same. We avoid protecting POINTERFREE
2115: * objects only if they are the same.
1.1 noro 2116: */
2117:
1.7 noro 2118: # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)
1.1 noro 2119:
2120: # include <sys/mman.h>
2121: # include <signal.h>
2122: # include <sys/syscall.h>
2123:
2124: # define PROTECT(addr, len) \
1.2 noro 2125: if (mprotect((caddr_t)(addr), (size_t)(len), \
1.1 noro 2126: PROT_READ | OPT_PROT_EXEC) < 0) { \
2127: ABORT("mprotect failed"); \
2128: }
2129: # define UNPROTECT(addr, len) \
1.2 noro 2130: if (mprotect((caddr_t)(addr), (size_t)(len), \
1.1 noro 2131: PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
2132: ABORT("un-mprotect failed"); \
2133: }
2134:
2135: # else
2136:
1.7 noro 2137: # ifdef DARWIN
2138: /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2139: decrease the likelihood of some of the problems described below. */
2140: #include <mach/vm_map.h>
2141: extern mach_port_t GC_task_self;
2142: #define PROTECT(addr,len) \
2143: if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2144: FALSE,VM_PROT_READ) != KERN_SUCCESS) { \
2145: ABORT("vm_portect failed"); \
2146: }
2147: #define UNPROTECT(addr,len) \
2148: if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2149: FALSE,VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) { \
2150: ABORT("vm_portect failed"); \
2151: }
2152: # else
2153:
1.4 noro 2154: # ifndef MSWINCE
2155: # include <signal.h>
2156: # endif
1.1 noro 2157:
2158: static DWORD protect_junk;
2159: # define PROTECT(addr, len) \
2160: if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
2161: &protect_junk)) { \
2162: DWORD last_error = GetLastError(); \
2163: GC_printf1("Last error code: %lx\n", last_error); \
2164: ABORT("VirtualProtect failed"); \
2165: }
2166: # define UNPROTECT(addr, len) \
2167: if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
2168: &protect_junk)) { \
2169: ABORT("un-VirtualProtect failed"); \
2170: }
1.7 noro 2171: # endif /* !DARWIN */
2172: # endif /* MSWIN32 || MSWINCE || DARWIN */
1.1 noro 2173:
2174: #if defined(SUNOS4) || defined(FREEBSD)
2175: typedef void (* SIG_PF)();
1.7 noro 2176: #endif /* SUNOS4 || FREEBSD */
2177:
1.6 noro 2178: #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
1.7 noro 2179: || defined(HURD)
1.2 noro 2180: # ifdef __STDC__
1.1 noro 2181: typedef void (* SIG_PF)(int);
1.2 noro 2182: # else
2183: typedef void (* SIG_PF)();
2184: # endif
1.7 noro 2185: #endif /* SUNOS5SIGS || OSF1 || LINUX || HURD */
2186:
1.1 noro 2187: #if defined(MSWIN32)
2188: typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
2189: # undef SIG_DFL
2190: # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
2191: #endif
1.4 noro 2192: #if defined(MSWINCE)
2193: typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
2194: # undef SIG_DFL
2195: # define SIG_DFL (SIG_PF) (-1)
2196: #endif
1.1 noro 2197:
1.6 noro 2198: #if defined(IRIX5) || defined(OSF1) || defined(HURD)
1.1 noro 2199: typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1.7 noro 2200: #endif /* IRIX5 || OSF1 || HURD */
2201:
1.1 noro 2202: #if defined(SUNOS5SIGS)
1.2 noro 2203: # ifdef HPUX
2204: # define SIGINFO __siginfo
2205: # else
2206: # define SIGINFO siginfo
2207: # endif
2208: # ifdef __STDC__
2209: typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
2210: # else
2211: typedef void (* REAL_SIG_PF)();
2212: # endif
1.7 noro 2213: #endif /* SUNOS5SIGS */
2214:
1.1 noro 2215: #if defined(LINUX)
1.6 noro 2216: # if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
1.1 noro 2217: typedef struct sigcontext s_c;
1.6 noro 2218: # else /* glibc < 2.2 */
2219: # include <linux/version.h>
2220: # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
2221: typedef struct sigcontext s_c;
2222: # else
2223: typedef struct sigcontext_struct s_c;
2224: # endif
2225: # endif /* glibc < 2.2 */
1.2 noro 2226: # if defined(ALPHA) || defined(M68K)
2227: typedef void (* REAL_SIG_PF)(int, int, s_c *);
2228: # else
1.4 noro 2229: # if defined(IA64) || defined(HP_PA)
1.2 noro 2230: typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
2231: # else
2232: typedef void (* REAL_SIG_PF)(int, s_c);
2233: # endif
2234: # endif
1.1 noro 2235: # ifdef ALPHA
2236: /* Retrieve fault address from sigcontext structure by decoding */
2237: /* instruction. */
2238: char * get_fault_addr(s_c *sc) {
2239: unsigned instr;
2240: word faultaddr;
2241:
2242: instr = *((unsigned *)(sc->sc_pc));
2243: faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
2244: faultaddr += (word) (((int)instr << 16) >> 16);
2245: return (char *)faultaddr;
2246: }
2247: # endif /* !ALPHA */
1.7 noro 2248: # endif /* LINUX */
1.4 noro 2249:
1.7 noro 2250: #ifndef DARWIN
1.1 noro 2251: SIG_PF GC_old_bus_handler;
2252: SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
1.7 noro 2253: #endif /* !DARWIN */
1.1 noro 2254:
1.7 noro 2255: #if defined(THREADS)
1.4 noro 2256: /* We need to lock around the bitmap update in the write fault handler */
2257: /* in order to avoid the risk of losing a bit. We do this with a */
2258: /* test-and-set spin lock if we know how to do that. Otherwise we */
2259: /* check whether we are already in the handler and use the dumb but */
2260: /* safe fallback algorithm of setting all bits in the word. */
2261: /* Contention should be very rare, so we do the minimum to handle it */
2262: /* correctly. */
2263: #ifdef GC_TEST_AND_SET_DEFINED
2264: static VOLATILE unsigned int fault_handler_lock = 0;
2265: void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
1.6 noro 2266: while (GC_test_and_set(&fault_handler_lock)) {}
1.4 noro 2267: /* Could also revert to set_pht_entry_from_index_safe if initial */
2268: /* GC_test_and_set fails. */
2269: set_pht_entry_from_index(db, index);
2270: GC_clear(&fault_handler_lock);
2271: }
2272: #else /* !GC_TEST_AND_SET_DEFINED */
2273: /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2274: /* just before we notice the conflict and correct it. We may end up */
2275: /* looking at it while it's wrong. But this requires contention */
2276: /* exactly when a GC is triggered, which seems far less likely to */
2277: /* fail than the old code, which had no reported failures. Thus we */
2278: /* leave it this way while we think of something better, or support */
2279: /* GC_test_and_set on the remaining platforms. */
2280: static VOLATILE word currently_updating = 0;
2281: void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2282: unsigned int update_dummy;
2283: currently_updating = (word)(&update_dummy);
2284: set_pht_entry_from_index(db, index);
2285: /* If we get contention in the 10 or so instruction window here, */
2286: /* and we get stopped by a GC between the two updates, we lose! */
2287: if (currently_updating != (word)(&update_dummy)) {
2288: set_pht_entry_from_index_safe(db, index);
2289: /* We claim that if two threads concurrently try to update the */
2290: /* dirty bit vector, the first one to execute UPDATE_START */
2291: /* will see it changed when UPDATE_END is executed. (Note that */
2292: /* &update_dummy must differ in two distinct threads.) It */
2293: /* will then execute set_pht_entry_from_index_safe, thus */
2294: /* returning us to a safe state, though not soon enough. */
2295: }
2296: }
2297: #endif /* !GC_TEST_AND_SET_DEFINED */
2298: #else /* !THREADS */
2299: # define async_set_pht_entry_from_index(db, index) \
2300: set_pht_entry_from_index(db, index)
2301: #endif /* !THREADS */
2302:
1.1 noro 2303: /*ARGSUSED*/
1.7 noro 2304: #if !defined(DARWIN)
1.1 noro 2305: # if defined (SUNOS4) || defined(FREEBSD)
2306: void GC_write_fault_handler(sig, code, scp, addr)
2307: int sig, code;
2308: struct sigcontext *scp;
2309: char * addr;
2310: # ifdef SUNOS4
2311: # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2312: # define CODE_OK (FC_CODE(code) == FC_PROT \
2313: || (FC_CODE(code) == FC_OBJERR \
2314: && FC_ERRNO(code) == FC_PROT))
2315: # endif
2316: # ifdef FREEBSD
2317: # define SIG_OK (sig == SIGBUS)
2318: # define CODE_OK (code == BUS_PAGE_FAULT)
2319: # endif
1.7 noro 2320: # endif /* SUNOS4 || FREEBSD */
2321:
1.6 noro 2322: # if defined(IRIX5) || defined(OSF1) || defined(HURD)
1.1 noro 2323: # include <errno.h>
2324: void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2325: # ifdef OSF1
1.6 noro 2326: # define SIG_OK (sig == SIGSEGV)
1.1 noro 2327: # define CODE_OK (code == 2 /* experimentally determined */)
2328: # endif
2329: # ifdef IRIX5
1.6 noro 2330: # define SIG_OK (sig == SIGSEGV)
1.1 noro 2331: # define CODE_OK (code == EACCES)
2332: # endif
1.6 noro 2333: # ifdef HURD
2334: # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
2335: # define CODE_OK TRUE
2336: # endif
1.7 noro 2337: # endif /* IRIX5 || OSF1 || HURD */
2338:
1.1 noro 2339: # if defined(LINUX)
1.2 noro 2340: # if defined(ALPHA) || defined(M68K)
1.1 noro 2341: void GC_write_fault_handler(int sig, int code, s_c * sc)
2342: # else
1.4 noro 2343: # if defined(IA64) || defined(HP_PA)
1.2 noro 2344: void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
2345: # else
1.6 noro 2346: # if defined(ARM32)
2347: void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
2348: # else
2349: void GC_write_fault_handler(int sig, s_c sc)
2350: # endif
1.2 noro 2351: # endif
1.1 noro 2352: # endif
2353: # define SIG_OK (sig == SIGSEGV)
2354: # define CODE_OK TRUE
1.2 noro 2355: /* Empirically c.trapno == 14, on IA32, but is that useful? */
2356: /* Should probably consider alignment issues on other */
2357: /* architectures. */
1.7 noro 2358: # endif /* LINUX */
2359:
1.1 noro 2360: # if defined(SUNOS5SIGS)
1.2 noro 2361: # ifdef __STDC__
2362: void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
2363: # else
2364: void GC_write_fault_handler(sig, scp, context)
2365: int sig;
2366: struct SIGINFO *scp;
2367: void * context;
2368: # endif
2369: # ifdef HPUX
2370: # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2371: # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2372: || (scp -> si_code == BUS_ADRERR) \
2373: || (scp -> si_code == BUS_UNKNOWN) \
2374: || (scp -> si_code == SEGV_UNKNOWN) \
2375: || (scp -> si_code == BUS_OBJERR)
2376: # else
2377: # define SIG_OK (sig == SIGSEGV)
2378: # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2379: # endif
1.7 noro 2380: # endif /* SUNOS5SIGS */
1.4 noro 2381:
2382: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 2383: LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
2384: # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1.4 noro 2385: STATUS_ACCESS_VIOLATION)
1.1 noro 2386: # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2387: /* Write fault */
1.7 noro 2388: # endif /* MSWIN32 || MSWINCE */
1.1 noro 2389: {
2390: register unsigned i;
1.6 noro 2391: # if defined(HURD)
2392: char *addr = (char *) code;
2393: # endif
1.1 noro 2394: # ifdef IRIX5
2395: char * addr = (char *) (size_t) (scp -> sc_badvaddr);
2396: # endif
2397: # if defined(OSF1) && defined(ALPHA)
2398: char * addr = (char *) (scp -> sc_traparg_a0);
2399: # endif
2400: # ifdef SUNOS5SIGS
2401: char * addr = (char *) (scp -> si_addr);
2402: # endif
2403: # ifdef LINUX
1.7 noro 2404: # if defined(I386) || defined (X86_64)
1.1 noro 2405: char * addr = (char *) (sc.cr2);
2406: # else
2407: # if defined(M68K)
2408: char * addr = NULL;
2409:
1.4 noro 2410: struct sigcontext *scp = (struct sigcontext *)(sc);
1.1 noro 2411:
2412: int format = (scp->sc_formatvec >> 12) & 0xf;
2413: unsigned long *framedata = (unsigned long *)(scp + 1);
2414: unsigned long ea;
2415:
2416: if (format == 0xa || format == 0xb) {
2417: /* 68020/030 */
2418: ea = framedata[2];
2419: } else if (format == 7) {
2420: /* 68040 */
2421: ea = framedata[3];
1.4 noro 2422: if (framedata[1] & 0x08000000) {
2423: /* correct addr on misaligned access */
2424: ea = (ea+4095)&(~4095);
2425: }
1.1 noro 2426: } else if (format == 4) {
2427: /* 68060 */
2428: ea = framedata[0];
2429: if (framedata[1] & 0x08000000) {
2430: /* correct addr on misaligned access */
2431: ea = (ea+4095)&(~4095);
2432: }
2433: }
2434: addr = (char *)ea;
2435: # else
2436: # ifdef ALPHA
2437: char * addr = get_fault_addr(sc);
2438: # else
1.4 noro 2439: # if defined(IA64) || defined(HP_PA)
1.2 noro 2440: char * addr = si -> si_addr;
1.3 noro 2441: /* I believe this is claimed to work on all platforms for */
2442: /* Linux 2.3.47 and later. Hopefully we don't have to */
2443: /* worry about earlier kernels on IA64. */
1.2 noro 2444: # else
2445: # if defined(POWERPC)
2446: char * addr = (char *) (sc.regs->dar);
2447: # else
1.6 noro 2448: # if defined(ARM32)
2449: char * addr = (char *)sc.fault_address;
2450: # else
2451: --> architecture not supported
2452: # endif
1.2 noro 2453: # endif
2454: # endif
1.1 noro 2455: # endif
2456: # endif
2457: # endif
2458: # endif
1.4 noro 2459: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 2460: char * addr = (char *) (exc_info -> ExceptionRecord
2461: -> ExceptionInformation[1]);
2462: # define sig SIGSEGV
2463: # endif
2464:
2465: if (SIG_OK && CODE_OK) {
2466: register struct hblk * h =
2467: (struct hblk *)((word)addr & ~(GC_page_size-1));
2468: GC_bool in_allocd_block;
2469:
2470: # ifdef SUNOS5SIGS
2471: /* Address is only within the correct physical page. */
2472: in_allocd_block = FALSE;
2473: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2474: if (HDR(h+i) != 0) {
2475: in_allocd_block = TRUE;
2476: }
2477: }
2478: # else
2479: in_allocd_block = (HDR(addr) != 0);
2480: # endif
2481: if (!in_allocd_block) {
2482: /* Heap blocks now begin and end on page boundaries */
2483: SIG_PF old_handler;
2484:
2485: if (sig == SIGSEGV) {
2486: old_handler = GC_old_segv_handler;
2487: } else {
2488: old_handler = GC_old_bus_handler;
2489: }
2490: if (old_handler == SIG_DFL) {
1.4 noro 2491: # if !defined(MSWIN32) && !defined(MSWINCE)
1.1 noro 2492: GC_err_printf1("Segfault at 0x%lx\n", addr);
2493: ABORT("Unexpected bus error or segmentation fault");
2494: # else
2495: return(EXCEPTION_CONTINUE_SEARCH);
2496: # endif
2497: } else {
2498: # if defined (SUNOS4) || defined(FREEBSD)
2499: (*old_handler) (sig, code, scp, addr);
2500: return;
2501: # endif
2502: # if defined (SUNOS5SIGS)
2503: (*(REAL_SIG_PF)old_handler) (sig, scp, context);
2504: return;
2505: # endif
2506: # if defined (LINUX)
1.2 noro 2507: # if defined(ALPHA) || defined(M68K)
1.1 noro 2508: (*(REAL_SIG_PF)old_handler) (sig, code, sc);
2509: # else
1.4 noro 2510: # if defined(IA64) || defined(HP_PA)
1.2 noro 2511: (*(REAL_SIG_PF)old_handler) (sig, si, scp);
2512: # else
1.1 noro 2513: (*(REAL_SIG_PF)old_handler) (sig, sc);
1.2 noro 2514: # endif
1.1 noro 2515: # endif
2516: return;
2517: # endif
1.6 noro 2518: # if defined (IRIX5) || defined(OSF1) || defined(HURD)
1.1 noro 2519: (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2520: return;
2521: # endif
2522: # ifdef MSWIN32
2523: return((*old_handler)(exc_info));
2524: # endif
2525: }
2526: }
1.6 noro 2527: UNPROTECT(h, GC_page_size);
2528: /* We need to make sure that no collection occurs between */
2529: /* the UNPROTECT and the setting of the dirty bit. Otherwise */
2530: /* a write by a third thread might go unnoticed. Reversing */
2531: /* the order is just as bad, since we would end up unprotecting */
2532: /* a page in a GC cycle during which it's not marked. */
2533: /* Currently we do this by disabling the thread stopping */
2534: /* signals while this handler is running. An alternative might */
2535: /* be to record the fact that we're about to unprotect, or */
2536: /* have just unprotected a page in the GC's thread structure, */
2537: /* and then to have the thread stopping code set the dirty */
2538: /* flag, if necessary. */
1.1 noro 2539: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2540: register int index = PHT_HASH(h+i);
2541:
1.4 noro 2542: async_set_pht_entry_from_index(GC_dirty_pages, index);
1.1 noro 2543: }
1.6 noro 2544: # if defined(OSF1)
1.1 noro 2545: /* These reset the signal handler each time by default. */
2546: signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
2547: # endif
2548: /* The write may not take place before dirty bits are read. */
2549: /* But then we'll fault again ... */
1.4 noro 2550: # if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 2551: return(EXCEPTION_CONTINUE_EXECUTION);
2552: # else
2553: return;
2554: # endif
2555: }
1.4 noro 2556: #if defined(MSWIN32) || defined(MSWINCE)
1.1 noro 2557: return EXCEPTION_CONTINUE_SEARCH;
2558: #else
2559: GC_err_printf1("Segfault at 0x%lx\n", addr);
2560: ABORT("Unexpected bus error or segmentation fault");
2561: #endif
2562: }
1.7 noro 2563: #endif /* !DARWIN */
1.1 noro 2564:
2565: /*
2566: * We hold the allocation lock. We expect block h to be written
1.6 noro 2567: * shortly. Ensure that all pages containing any part of the n hblks
2568: * starting at h are no longer protected. If is_ptrfree is false,
2569: * also ensure that they will subsequently appear to be dirty.
1.1 noro 2570: */
1.6 noro 2571: void GC_remove_protection(h, nblocks, is_ptrfree)
1.1 noro 2572: struct hblk *h;
1.6 noro 2573: word nblocks;
2574: GC_bool is_ptrfree;
1.1 noro 2575: {
1.6 noro 2576: struct hblk * h_trunc; /* Truncated to page boundary */
2577: struct hblk * h_end; /* Page boundary following block end */
2578: struct hblk * current;
2579: GC_bool found_clean;
1.1 noro 2580:
2581: if (!GC_dirty_maintained) return;
2582: h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
1.6 noro 2583: h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
2584: & ~(GC_page_size-1));
1.1 noro 2585: found_clean = FALSE;
1.6 noro 2586: for (current = h_trunc; current < h_end; ++current) {
2587: int index = PHT_HASH(current);
1.1 noro 2588:
1.6 noro 2589: if (!is_ptrfree || current < h || current >= h + nblocks) {
1.4 noro 2590: async_set_pht_entry_from_index(GC_dirty_pages, index);
1.1 noro 2591: }
2592: }
1.6 noro 2593: UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
1.1 noro 2594: }
2595:
1.7 noro 2596: #if !defined(DARWIN)
1.1 noro 2597: void GC_dirty_init()
2598: {
1.6 noro 2599: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
2600: defined(OSF1) || defined(HURD)
1.4 noro 2601: struct sigaction act, oldact;
1.6 noro 2602: /* We should probably specify SA_SIGINFO for Linux, and handle */
2603: /* the different architectures more uniformly. */
2604: # if defined(IRIX5) || defined(LINUX) || defined(OSF1) || defined(HURD)
1.1 noro 2605: act.sa_flags = SA_RESTART;
1.6 noro 2606: act.sa_handler = (SIG_PF)GC_write_fault_handler;
1.4 noro 2607: # else
1.1 noro 2608: act.sa_flags = SA_RESTART | SA_SIGINFO;
2609: act.sa_sigaction = GC_write_fault_handler;
1.4 noro 2610: # endif
2611: (void)sigemptyset(&act.sa_mask);
1.6 noro 2612: # ifdef SIG_SUSPEND
2613: /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
2614: /* handler. This effectively makes the handler atomic w.r.t. */
2615: /* stopping the world for GC. */
2616: (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
2617: # endif /* SIG_SUSPEND */
1.4 noro 2618: # endif
1.1 noro 2619: # ifdef PRINTSTATS
2620: GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2621: # endif
2622: GC_dirty_maintained = TRUE;
2623: if (GC_page_size % HBLKSIZE != 0) {
2624: GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2625: ABORT("Page size not multiple of HBLKSIZE");
2626: }
2627: # if defined(SUNOS4) || defined(FREEBSD)
2628: GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2629: if (GC_old_bus_handler == SIG_IGN) {
2630: GC_err_printf0("Previously ignored bus error!?");
2631: GC_old_bus_handler = SIG_DFL;
2632: }
2633: if (GC_old_bus_handler != SIG_DFL) {
2634: # ifdef PRINTSTATS
2635: GC_err_printf0("Replaced other SIGBUS handler\n");
2636: # endif
2637: }
2638: # endif
1.6 noro 2639: # if defined(SUNOS4)
1.1 noro 2640: GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2641: if (GC_old_segv_handler == SIG_IGN) {
2642: GC_err_printf0("Previously ignored segmentation violation!?");
2643: GC_old_segv_handler = SIG_DFL;
2644: }
2645: if (GC_old_segv_handler != SIG_DFL) {
2646: # ifdef PRINTSTATS
2647: GC_err_printf0("Replaced other SIGSEGV handler\n");
2648: # endif
2649: }
2650: # endif
1.6 noro 2651: # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) \
2652: || defined(OSF1) || defined(HURD)
2653: /* SUNOS5SIGS includes HPUX */
2654: # if defined(GC_IRIX_THREADS)
1.1 noro 2655: sigaction(SIGSEGV, 0, &oldact);
2656: sigaction(SIGSEGV, &act, 0);
2657: # else
1.7 noro 2658: {
2659: int res = sigaction(SIGSEGV, &act, &oldact);
2660: if (res != 0) ABORT("Sigaction failed");
2661: }
1.1 noro 2662: # endif
1.6 noro 2663: # if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
1.1 noro 2664: /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2665: /* sa_sigaction. */
2666: GC_old_segv_handler = oldact.sa_handler;
1.6 noro 2667: # else /* Irix 6.x or SUNOS5SIGS or LINUX */
1.1 noro 2668: if (oldact.sa_flags & SA_SIGINFO) {
2669: GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2670: } else {
2671: GC_old_segv_handler = oldact.sa_handler;
2672: }
2673: # endif
2674: if (GC_old_segv_handler == SIG_IGN) {
2675: GC_err_printf0("Previously ignored segmentation violation!?");
2676: GC_old_segv_handler = SIG_DFL;
2677: }
2678: if (GC_old_segv_handler != SIG_DFL) {
2679: # ifdef PRINTSTATS
2680: GC_err_printf0("Replaced other SIGSEGV handler\n");
2681: # endif
2682: }
1.4 noro 2683: # endif
1.7 noro 2684: # if defined(HPUX) || defined(LINUX) || defined(HURD)
1.4 noro 2685: sigaction(SIGBUS, &act, &oldact);
2686: GC_old_bus_handler = oldact.sa_handler;
2687: if (GC_old_bus_handler == SIG_IGN) {
2688: GC_err_printf0("Previously ignored bus error!?");
2689: GC_old_bus_handler = SIG_DFL;
2690: }
2691: if (GC_old_bus_handler != SIG_DFL) {
2692: # ifdef PRINTSTATS
2693: GC_err_printf0("Replaced other SIGBUS handler\n");
2694: # endif
2695: }
1.7 noro 2696: # endif /* HPUX || LINUX || HURD */
1.1 noro 2697: # if defined(MSWIN32)
2698: GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2699: if (GC_old_segv_handler != NULL) {
2700: # ifdef PRINTSTATS
2701: GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2702: # endif
2703: } else {
2704: GC_old_segv_handler = SIG_DFL;
2705: }
2706: # endif
2707: }
1.7 noro 2708: #endif /* !DARWIN */
1.1 noro 2709:
1.6 noro 2710: int GC_incremental_protection_needs()
2711: {
2712: if (GC_page_size == HBLKSIZE) {
2713: return GC_PROTECTS_POINTER_HEAP;
2714: } else {
2715: return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
2716: }
2717: }
2718:
2719: #define HAVE_INCREMENTAL_PROTECTION_NEEDS
1.1 noro 2720:
1.6 noro 2721: #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
1.1 noro 2722:
1.6 noro 2723: #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
1.1 noro 2724: void GC_protect_heap()
2725: {
2726: ptr_t start;
2727: word len;
1.6 noro 2728: struct hblk * current;
2729: struct hblk * current_start; /* Start of block to be protected. */
2730: struct hblk * limit;
1.1 noro 2731: unsigned i;
1.6 noro 2732: GC_bool protect_all =
2733: (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
1.1 noro 2734: for (i = 0; i < GC_n_heap_sects; i++) {
2735: start = GC_heap_sects[i].hs_start;
2736: len = GC_heap_sects[i].hs_bytes;
1.6 noro 2737: if (protect_all) {
2738: PROTECT(start, len);
2739: } else {
2740: GC_ASSERT(PAGE_ALIGNED(len))
2741: GC_ASSERT(PAGE_ALIGNED(start))
2742: current_start = current = (struct hblk *)start;
2743: limit = (struct hblk *)(start + len);
2744: while (current < limit) {
2745: hdr * hhdr;
2746: word nhblks;
2747: GC_bool is_ptrfree;
2748:
2749: GC_ASSERT(PAGE_ALIGNED(current));
2750: GET_HDR(current, hhdr);
2751: if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
2752: /* This can happen only if we're at the beginning of a */
2753: /* heap segment, and a block spans heap segments. */
2754: /* We will handle that block as part of the preceding */
2755: /* segment. */
2756: GC_ASSERT(current_start == current);
2757: current_start = ++current;
2758: continue;
2759: }
2760: if (HBLK_IS_FREE(hhdr)) {
2761: GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
2762: nhblks = divHBLKSZ(hhdr -> hb_sz);
2763: is_ptrfree = TRUE; /* dirty on alloc */
2764: } else {
2765: nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
2766: is_ptrfree = IS_PTRFREE(hhdr);
2767: }
2768: if (is_ptrfree) {
2769: if (current_start < current) {
2770: PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
2771: }
2772: current_start = (current += nhblks);
2773: } else {
2774: current += nhblks;
2775: }
2776: }
2777: if (current_start < current) {
2778: PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
2779: }
2780: }
1.1 noro 2781: }
2782: }
2783:
2784: /* We assume that either the world is stopped or its OK to lose dirty */
2785: /* bits while this is happenning (as in GC_enable_incremental). */
2786: void GC_read_dirty()
2787: {
2788: BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2789: (sizeof GC_dirty_pages));
2790: BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2791: GC_protect_heap();
2792: }
2793:
2794: GC_bool GC_page_was_dirty(h)
2795: struct hblk * h;
2796: {
2797: register word index = PHT_HASH(h);
2798:
2799: return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2800: }
2801:
2802: /*
2803: * Acquiring the allocation lock here is dangerous, since this
2804: * can be called from within GC_call_with_alloc_lock, and the cord
2805: * package does so. On systems that allow nested lock acquisition, this
2806: * happens to work.
2807: * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2808: */
1.4 noro 2809:
2810: static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
1.1 noro 2811:
2812: void GC_begin_syscall()
2813: {
1.4 noro 2814: if (!I_HOLD_LOCK()) {
2815: LOCK();
2816: syscall_acquired_lock = TRUE;
2817: }
1.1 noro 2818: }
2819:
2820: void GC_end_syscall()
2821: {
1.4 noro 2822: if (syscall_acquired_lock) {
2823: syscall_acquired_lock = FALSE;
2824: UNLOCK();
2825: }
1.1 noro 2826: }
2827:
2828: void GC_unprotect_range(addr, len)
2829: ptr_t addr;
2830: word len;
2831: {
2832: struct hblk * start_block;
2833: struct hblk * end_block;
2834: register struct hblk *h;
2835: ptr_t obj_start;
2836:
1.6 noro 2837: if (!GC_dirty_maintained) return;
1.1 noro 2838: obj_start = GC_base(addr);
2839: if (obj_start == 0) return;
2840: if (GC_base(addr + len - 1) != obj_start) {
2841: ABORT("GC_unprotect_range(range bigger than object)");
2842: }
2843: start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2844: end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2845: end_block += GC_page_size/HBLKSIZE - 1;
2846: for (h = start_block; h <= end_block; h++) {
2847: register word index = PHT_HASH(h);
2848:
1.4 noro 2849: async_set_pht_entry_from_index(GC_dirty_pages, index);
1.1 noro 2850: }
2851: UNPROTECT(start_block,
2852: ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2853: }
2854:
1.6 noro 2855: #if 0
2856:
2857: /* We no longer wrap read by default, since that was causing too many */
2858: /* problems. It is preferred that the client instead avoids writing */
2859: /* to the write-protected heap with a system call. */
2860: /* This still serves as sample code if you do want to wrap system calls.*/
2861:
2862: #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
2863: /* Replacement for UNIX system call. */
2864: /* Other calls that write to the heap should be handled similarly. */
2865: /* Note that this doesn't work well for blocking reads: It will hold */
2866: /* the allocation lock for the entire duration of the call. Multithreaded */
2867: /* clients should really ensure that it won't block, either by setting */
2868: /* the descriptor nonblocking, or by calling select or poll first, to */
2869: /* make sure that input is available. */
2870: /* Another, preferred alternative is to ensure that system calls never */
2871: /* write to the protected heap (see above). */
1.1 noro 2872: # if defined(__STDC__) && !defined(SUNOS4)
2873: # include <unistd.h>
1.3 noro 2874: # include <sys/uio.h>
1.1 noro 2875: ssize_t read(int fd, void *buf, size_t nbyte)
2876: # else
2877: # ifndef LINT
2878: int read(fd, buf, nbyte)
2879: # else
2880: int GC_read(fd, buf, nbyte)
2881: # endif
2882: int fd;
2883: char *buf;
2884: int nbyte;
2885: # endif
2886: {
2887: int result;
2888:
2889: GC_begin_syscall();
2890: GC_unprotect_range(buf, (word)nbyte);
1.6 noro 2891: # if defined(IRIX5) || defined(GC_LINUX_THREADS)
1.1 noro 2892: /* Indirect system call may not always be easily available. */
2893: /* We could call _read, but that would interfere with the */
2894: /* libpthread interception of read. */
1.3 noro 2895: /* On Linux, we have to be careful with the linuxthreads */
2896: /* read interception. */
1.1 noro 2897: {
2898: struct iovec iov;
2899:
2900: iov.iov_base = buf;
2901: iov.iov_len = nbyte;
2902: result = readv(fd, &iov, 1);
2903: }
2904: # else
1.6 noro 2905: # if defined(HURD)
2906: result = __read(fd, buf, nbyte);
2907: # else
1.4 noro 2908: /* The two zero args at the end of this list are because one
2909: IA-64 syscall() implementation actually requires six args
2910: to be passed, even though they aren't always used. */
2911: result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
1.6 noro 2912: # endif /* !HURD */
1.1 noro 2913: # endif
2914: GC_end_syscall();
2915: return(result);
2916: }
1.6 noro 2917: #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
1.3 noro 2918:
1.6 noro 2919: #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
1.3 noro 2920: /* We use the GNU ld call wrapping facility. */
2921: /* This requires that the linker be invoked with "--wrap read". */
2922: /* This can be done by passing -Wl,"--wrap read" to gcc. */
2923: /* I'm not sure that this actually wraps whatever version of read */
2924: /* is called by stdio. That code also mentions __read. */
2925: # include <unistd.h>
2926: ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2927: {
2928: int result;
2929:
2930: GC_begin_syscall();
2931: GC_unprotect_range(buf, (word)nbyte);
2932: result = __real_read(fd, buf, nbyte);
2933: GC_end_syscall();
2934: return(result);
2935: }
2936:
2937: /* We should probably also do this for __read, or whatever stdio */
2938: /* actually calls. */
2939: #endif
1.1 noro 2940:
1.6 noro 2941: #endif /* 0 */
2942:
1.1 noro 2943: /*ARGSUSED*/
2944: GC_bool GC_page_was_ever_dirty(h)
2945: struct hblk *h;
2946: {
2947: return(TRUE);
2948: }
2949:
2950: /* Reset the n pages starting at h to "was never dirty" status. */
2951: /*ARGSUSED*/
2952: void GC_is_fresh(h, n)
2953: struct hblk *h;
2954: word n;
2955: {
2956: }
2957:
2958: # endif /* MPROTECT_VDB */
2959:
2960: # ifdef PROC_VDB
2961:
2962: /*
2963: * See DEFAULT_VDB for interface descriptions.
2964: */
2965:
2966: /*
2967: * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2968: * from which we can read page modified bits. This facility is far from
2969: * optimal (e.g. we would like to get the info for only some of the
2970: * address space), but it avoids intercepting system calls.
2971: */
2972:
2973: #include <errno.h>
2974: #include <sys/types.h>
2975: #include <sys/signal.h>
2976: #include <sys/fault.h>
2977: #include <sys/syscall.h>
2978: #include <sys/procfs.h>
2979: #include <sys/stat.h>
2980:
2981: #define INITIAL_BUF_SZ 4096
2982: word GC_proc_buf_size = INITIAL_BUF_SZ;
2983: char *GC_proc_buf;
2984:
1.6 noro 2985: #ifdef GC_SOLARIS_THREADS
1.1 noro 2986: /* We don't have exact sp values for threads. So we count on */
2987: /* occasionally declaring stack pages to be fresh. Thus we */
2988: /* need a real implementation of GC_is_fresh. We can't clear */
2989: /* entries in GC_written_pages, since that would declare all */
2990: /* pages with the given hash address to be fresh. */
2991: # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2992: struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
2993: /* Collisions are dropped. */
2994:
2995: # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2996: # define ADD_FRESH_PAGE(h) \
2997: GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2998: # define PAGE_IS_FRESH(h) \
2999: (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
3000: #endif
3001:
3002: /* Add all pages in pht2 to pht1 */
3003: void GC_or_pages(pht1, pht2)
3004: page_hash_table pht1, pht2;
3005: {
3006: register int i;
3007:
3008: for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
3009: }
3010:
3011: int GC_proc_fd;
3012:
3013: void GC_dirty_init()
3014: {
3015: int fd;
3016: char buf[30];
3017:
3018: GC_dirty_maintained = TRUE;
3019: if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
3020: register int i;
3021:
3022: for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
3023: # ifdef PRINTSTATS
3024: GC_printf1("Allocated words:%lu:all pages may have been written\n",
3025: (unsigned long)
3026: (GC_words_allocd + GC_words_allocd_before_gc));
3027: # endif
3028: }
3029: sprintf(buf, "/proc/%d", getpid());
3030: fd = open(buf, O_RDONLY);
3031: if (fd < 0) {
3032: ABORT("/proc open failed");
3033: }
3034: GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
3035: close(fd);
1.7 noro 3036: syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
1.1 noro 3037: if (GC_proc_fd < 0) {
3038: ABORT("/proc ioctl failed");
3039: }
3040: GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
1.6 noro 3041: # ifdef GC_SOLARIS_THREADS
1.1 noro 3042: GC_fresh_pages = (struct hblk **)
3043: GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
3044: if (GC_fresh_pages == 0) {
3045: GC_err_printf0("No space for fresh pages\n");
3046: EXIT();
3047: }
3048: BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
3049: # endif
3050: }
3051:
3052: /* Ignore write hints. They don't help us here. */
3053: /*ARGSUSED*/
1.6 noro 3054: void GC_remove_protection(h, nblocks, is_ptrfree)
1.1 noro 3055: struct hblk *h;
1.6 noro 3056: word nblocks;
3057: GC_bool is_ptrfree;
1.1 noro 3058: {
3059: }
3060:
1.6 noro 3061: #ifdef GC_SOLARIS_THREADS
1.1 noro 3062: # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
3063: #else
3064: # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
3065: #endif
3066:
3067: void GC_read_dirty()
3068: {
3069: unsigned long ps, np;
3070: int nmaps;
3071: ptr_t vaddr;
3072: struct prasmap * map;
3073: char * bufp;
3074: ptr_t current_addr, limit;
3075: int i;
3076: int dummy;
3077:
3078: BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
3079:
3080: bufp = GC_proc_buf;
3081: if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3082: # ifdef PRINTSTATS
3083: GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
3084: GC_proc_buf_size);
3085: # endif
3086: {
3087: /* Retry with larger buffer. */
3088: word new_size = 2 * GC_proc_buf_size;
3089: char * new_buf = GC_scratch_alloc(new_size);
3090:
3091: if (new_buf != 0) {
3092: GC_proc_buf = bufp = new_buf;
3093: GC_proc_buf_size = new_size;
3094: }
3095: if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3096: WARN("Insufficient space for /proc read\n", 0);
3097: /* Punt: */
3098: memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
3099: memset(GC_written_pages, 0xff, sizeof(page_hash_table));
1.6 noro 3100: # ifdef GC_SOLARIS_THREADS
1.1 noro 3101: BZERO(GC_fresh_pages,
3102: MAX_FRESH_PAGES * sizeof (struct hblk *));
3103: # endif
3104: return;
3105: }
3106: }
3107: }
3108: /* Copy dirty bits into GC_grungy_pages */
3109: nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
3110: /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
3111: nmaps, PG_REFERENCED, PG_MODIFIED); */
3112: bufp = bufp + sizeof(struct prpageheader);
3113: for (i = 0; i < nmaps; i++) {
3114: map = (struct prasmap *)bufp;
3115: vaddr = (ptr_t)(map -> pr_vaddr);
3116: ps = map -> pr_pagesize;
3117: np = map -> pr_npage;
3118: /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
3119: limit = vaddr + ps * np;
3120: bufp += sizeof (struct prasmap);
3121: for (current_addr = vaddr;
3122: current_addr < limit; current_addr += ps){
3123: if ((*bufp++) & PG_MODIFIED) {
3124: register struct hblk * h = (struct hblk *) current_addr;
3125:
3126: while ((ptr_t)h < current_addr + ps) {
3127: register word index = PHT_HASH(h);
3128:
3129: set_pht_entry_from_index(GC_grungy_pages, index);
1.6 noro 3130: # ifdef GC_SOLARIS_THREADS
1.1 noro 3131: {
3132: register int slot = FRESH_PAGE_SLOT(h);
3133:
3134: if (GC_fresh_pages[slot] == h) {
3135: GC_fresh_pages[slot] = 0;
3136: }
3137: }
3138: # endif
3139: h++;
3140: }
3141: }
3142: }
3143: bufp += sizeof(long) - 1;
3144: bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
3145: }
3146: /* Update GC_written_pages. */
3147: GC_or_pages(GC_written_pages, GC_grungy_pages);
1.6 noro 3148: # ifdef GC_SOLARIS_THREADS
1.1 noro 3149: /* Make sure that old stacks are considered completely clean */
3150: /* unless written again. */
3151: GC_old_stacks_are_fresh();
3152: # endif
3153: }
3154:
3155: #undef READ
3156:
3157: GC_bool GC_page_was_dirty(h)
3158: struct hblk *h;
3159: {
3160: register word index = PHT_HASH(h);
3161: register GC_bool result;
3162:
3163: result = get_pht_entry_from_index(GC_grungy_pages, index);
1.6 noro 3164: # ifdef GC_SOLARIS_THREADS
1.1 noro 3165: if (result && PAGE_IS_FRESH(h)) result = FALSE;
3166: /* This happens only if page was declared fresh since */
3167: /* the read_dirty call, e.g. because it's in an unused */
3168: /* thread stack. It's OK to treat it as clean, in */
3169: /* that case. And it's consistent with */
3170: /* GC_page_was_ever_dirty. */
3171: # endif
3172: return(result);
3173: }
3174:
3175: GC_bool GC_page_was_ever_dirty(h)
3176: struct hblk *h;
3177: {
3178: register word index = PHT_HASH(h);
3179: register GC_bool result;
3180:
3181: result = get_pht_entry_from_index(GC_written_pages, index);
1.6 noro 3182: # ifdef GC_SOLARIS_THREADS
1.1 noro 3183: if (result && PAGE_IS_FRESH(h)) result = FALSE;
3184: # endif
3185: return(result);
3186: }
3187:
3188: /* Caller holds allocation lock. */
3189: void GC_is_fresh(h, n)
3190: struct hblk *h;
3191: word n;
3192: {
3193:
3194: register word index;
3195:
1.6 noro 3196: # ifdef GC_SOLARIS_THREADS
1.1 noro 3197: register word i;
3198:
3199: if (GC_fresh_pages != 0) {
3200: for (i = 0; i < n; i++) {
3201: ADD_FRESH_PAGE(h + i);
3202: }
3203: }
3204: # endif
3205: }
3206:
3207: # endif /* PROC_VDB */
3208:
3209:
3210: # ifdef PCR_VDB
3211:
3212: # include "vd/PCR_VD.h"
3213:
3214: # define NPAGES (32*1024) /* 128 MB */
3215:
3216: PCR_VD_DB GC_grungy_bits[NPAGES];
3217:
3218: ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
3219: /* HBLKSIZE aligned. */
3220:
3221: void GC_dirty_init()
3222: {
3223: GC_dirty_maintained = TRUE;
3224: /* For the time being, we assume the heap generally grows up */
3225: GC_vd_base = GC_heap_sects[0].hs_start;
3226: if (GC_vd_base == 0) {
3227: ABORT("Bad initial heap segment");
3228: }
3229: if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3230: != PCR_ERes_okay) {
3231: ABORT("dirty bit initialization failed");
3232: }
3233: }
3234:
3235: void GC_read_dirty()
3236: {
3237: /* lazily enable dirty bits on newly added heap sects */
3238: {
3239: static int onhs = 0;
3240: int nhs = GC_n_heap_sects;
3241: for( ; onhs < nhs; onhs++ ) {
3242: PCR_VD_WriteProtectEnable(
3243: GC_heap_sects[onhs].hs_start,
3244: GC_heap_sects[onhs].hs_bytes );
3245: }
3246: }
3247:
3248:
3249: if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3250: != PCR_ERes_okay) {
3251: ABORT("dirty bit read failed");
3252: }
3253: }
3254:
3255: GC_bool GC_page_was_dirty(h)
3256: struct hblk *h;
3257: {
3258: if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
3259: return(TRUE);
3260: }
3261: return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
3262: }
3263:
3264: /*ARGSUSED*/
1.6 noro 3265: void GC_remove_protection(h, nblocks, is_ptrfree)
1.1 noro 3266: struct hblk *h;
1.6 noro 3267: word nblocks;
3268: GC_bool is_ptrfree;
1.1 noro 3269: {
1.6 noro 3270: PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
3271: PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
1.1 noro 3272: }
3273:
3274: # endif /* PCR_VDB */
3275:
1.7 noro 3276: #if defined(MPROTECT_VDB) && defined(DARWIN)
3277: /* The following sources were used as a *reference* for this exception handling
3278: code:
3279: 1. Apple's mach/xnu documentation
3280: 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3281: omnigroup's macosx-dev list.
3282: www.omnigroup.com/mailman/archive/macosx-dev/2000-June/002030.html
3283: 3. macosx-nat.c from Apple's GDB source code.
3284: */
3285:
3286: /* There seem to be numerous problems with darwin's mach exception handling.
3287: I'm pretty sure they are not problems in my code. Search for
3288: BROKEN_EXCEPTION_HANDLING for more information. */
3289: #define BROKEN_EXCEPTION_HANDLING
3290:
3291: #include <mach/mach.h>
3292: #include <mach/mach_error.h>
3293: #include <mach/thread_status.h>
3294: #include <mach/exception.h>
3295: #include <mach/task.h>
3296: #include <pthread.h>
3297:
3298: /* These are not defined in any header, although they are documented */
3299: extern boolean_t exc_server(mach_msg_header_t *,mach_msg_header_t *);
3300: extern kern_return_t exception_raise(
3301: mach_port_t,mach_port_t,mach_port_t,
3302: exception_type_t,exception_data_t,mach_msg_type_number_t);
3303: extern kern_return_t exception_raise_state(
3304: mach_port_t,mach_port_t,mach_port_t,
3305: exception_type_t,exception_data_t,mach_msg_type_number_t,
3306: thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
3307: thread_state_t,mach_msg_type_number_t*);
3308: extern kern_return_t exception_raise_state_identity(
3309: mach_port_t,mach_port_t,mach_port_t,
3310: exception_type_t,exception_data_t,mach_msg_type_number_t,
3311: thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
3312: thread_state_t,mach_msg_type_number_t*);
3313:
3314:
3315: #define MAX_EXCEPTION_PORTS 16
3316:
3317: static mach_port_t GC_task_self;
3318:
3319: static struct {
3320: mach_msg_type_number_t count;
3321: exception_mask_t masks[MAX_EXCEPTION_PORTS];
3322: exception_handler_t ports[MAX_EXCEPTION_PORTS];
3323: exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
3324: thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
3325: } GC_old_exc_ports;
3326:
3327: static struct {
3328: mach_port_t exception;
3329: #if defined(THREADS)
3330: mach_port_t reply;
3331: #endif
3332: } GC_ports;
3333:
3334: typedef struct {
3335: mach_msg_header_t head;
3336: } GC_msg_t;
3337:
3338: typedef enum {
3339: GC_MP_NORMAL, GC_MP_DISCARDING, GC_MP_STOPPED
3340: } GC_mprotect_state_t;
3341:
3342: /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field,
3343: but it isn't documented. Use the source and see if they
3344: should be ok. */
3345: #define ID_STOP 1
3346: #define ID_RESUME 2
3347:
3348: /* These values are only used on the reply port */
3349: #define ID_ACK 3
3350:
3351: #if defined(THREADS)
3352:
3353: GC_mprotect_state_t GC_mprotect_state;
3354:
3355: /* The following should ONLY be called when the world is stopped */
3356: static void GC_mprotect_thread_notify(mach_msg_id_t id) {
3357: struct {
3358: GC_msg_t msg;
3359: mach_msg_trailer_t trailer;
3360: } buf;
3361: mach_msg_return_t r;
3362: /* remote, local */
3363: buf.msg.head.msgh_bits =
3364: MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
3365: buf.msg.head.msgh_size = sizeof(buf.msg);
3366: buf.msg.head.msgh_remote_port = GC_ports.exception;
3367: buf.msg.head.msgh_local_port = MACH_PORT_NULL;
3368: buf.msg.head.msgh_id = id;
3369:
3370: r = mach_msg(
3371: &buf.msg.head,
3372: MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_LARGE,
3373: sizeof(buf.msg),
3374: sizeof(buf),
3375: GC_ports.reply,
3376: MACH_MSG_TIMEOUT_NONE,
3377: MACH_PORT_NULL);
3378: if(r != MACH_MSG_SUCCESS)
3379: ABORT("mach_msg failed in GC_mprotect_thread_notify");
3380: if(buf.msg.head.msgh_id != ID_ACK)
3381: ABORT("invalid ack in GC_mprotect_thread_notify");
3382: }
3383:
3384: /* Should only be called by the mprotect thread */
3385: static void GC_mprotect_thread_reply() {
3386: GC_msg_t msg;
3387: mach_msg_return_t r;
3388: /* remote, local */
3389: msg.head.msgh_bits =
3390: MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
3391: msg.head.msgh_size = sizeof(msg);
3392: msg.head.msgh_remote_port = GC_ports.reply;
3393: msg.head.msgh_local_port = MACH_PORT_NULL;
3394: msg.head.msgh_id = ID_ACK;
3395:
3396: r = mach_msg(
3397: &msg.head,
3398: MACH_SEND_MSG,
3399: sizeof(msg),
3400: 0,
3401: MACH_PORT_NULL,
3402: MACH_MSG_TIMEOUT_NONE,
3403: MACH_PORT_NULL);
3404: if(r != MACH_MSG_SUCCESS)
3405: ABORT("mach_msg failed in GC_mprotect_thread_reply");
3406: }
3407:
3408: void GC_mprotect_stop() {
3409: GC_mprotect_thread_notify(ID_STOP);
3410: }
3411: void GC_mprotect_resume() {
3412: GC_mprotect_thread_notify(ID_RESUME);
3413: }
3414:
3415: #else /* !THREADS */
3416: /* The compiler should optimize away any GC_mprotect_state computations */
3417: #define GC_mprotect_state GC_MP_NORMAL
3418: #endif
3419:
3420: static void *GC_mprotect_thread(void *arg) {
3421: mach_msg_return_t r;
3422: /* These two structures contain some private kernel data. We don't need to
3423: access any of it so we don't bother defining a proper struct. The
3424: correct definitions are in the xnu source code. */
3425: struct {
3426: mach_msg_header_t head;
3427: char data[256];
3428: } reply;
3429: struct {
3430: mach_msg_header_t head;
3431: mach_msg_body_t msgh_body;
3432: char data[1024];
3433: } msg;
3434:
3435: mach_msg_id_t id;
3436:
3437: for(;;) {
3438: r = mach_msg(
3439: &msg.head,
3440: MACH_RCV_MSG|MACH_RCV_LARGE|
3441: (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
3442: 0,
3443: sizeof(msg),
3444: GC_ports.exception,
3445: GC_mprotect_state == GC_MP_DISCARDING ? 0 : MACH_MSG_TIMEOUT_NONE,
3446: MACH_PORT_NULL);
3447:
3448: id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
3449:
3450: #if defined(THREADS)
3451: if(GC_mprotect_state == GC_MP_DISCARDING) {
3452: if(r == MACH_RCV_TIMED_OUT) {
3453: GC_mprotect_state = GC_MP_STOPPED;
3454: GC_mprotect_thread_reply();
3455: continue;
3456: }
3457: if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
3458: ABORT("out of order mprotect thread request");
3459: }
3460: #endif
3461:
3462: if(r != MACH_MSG_SUCCESS) {
3463: GC_err_printf2("mach_msg failed with %d %s\n",
3464: (int)r,mach_error_string(r));
3465: ABORT("mach_msg failed");
3466: }
3467:
3468: switch(id) {
3469: #if defined(THREADS)
3470: case ID_STOP:
3471: if(GC_mprotect_state != GC_MP_NORMAL)
3472: ABORT("Called mprotect_stop when state wasn't normal");
3473: GC_mprotect_state = GC_MP_DISCARDING;
3474: break;
3475: case ID_RESUME:
3476: if(GC_mprotect_state != GC_MP_STOPPED)
3477: ABORT("Called mprotect_resume when state wasn't stopped");
3478: GC_mprotect_state = GC_MP_NORMAL;
3479: GC_mprotect_thread_reply();
3480: break;
3481: #endif /* THREADS */
3482: default:
3483: /* Handle the message (calls catch_exception_raise) */
3484: if(!exc_server(&msg.head,&reply.head))
3485: ABORT("exc_server failed");
3486: /* Send the reply */
3487: r = mach_msg(
3488: &reply.head,
3489: MACH_SEND_MSG,
3490: reply.head.msgh_size,
3491: 0,
3492: MACH_PORT_NULL,
3493: MACH_MSG_TIMEOUT_NONE,
3494: MACH_PORT_NULL);
3495: if(r != MACH_MSG_SUCCESS) {
3496: /* This will fail if the thread dies, but the thread shouldn't
3497: die... */
3498: #ifdef BROKEN_EXCEPTION_HANDLING
3499: GC_err_printf2(
3500: "mach_msg failed with %d %s while sending exc reply\n",
3501: (int)r,mach_error_string(r));
3502: #else
3503: ABORT("mach_msg failed while sending exception reply");
3504: #endif
3505: }
3506: } /* switch */
3507: } /* for(;;) */
3508: /* NOT REACHED */
3509: return NULL;
3510: }
3511:
3512: /* All this SIGBUS code shouldn't be necessary. All protection faults should
3513: be going throught the mach exception handler. However, it seems a SIGBUS is
3514: occasionally sent for some unknown reason. Even more odd, it seems to be
3515: meaningless and safe to ignore. */
3516: #ifdef BROKEN_EXCEPTION_HANDLING
3517:
3518: typedef void (* SIG_PF)();
3519: static SIG_PF GC_old_bus_handler;
3520:
3521: /* Updates to this aren't atomic, but the SIGBUSs seem pretty rare.
3522: Even if this doesn't get updated property, it isn't really a problem */
3523: static int GC_sigbus_count;
3524:
3525: static void GC_darwin_sigbus(int num,siginfo_t *sip,void *context) {
3526: if(num != SIGBUS) ABORT("Got a non-sigbus signal in the sigbus handler");
3527:
3528: /* Ugh... some seem safe to ignore, but too many in a row probably means
3529: trouble. GC_sigbus_count is reset for each mach exception that is
3530: handled */
3531: if(GC_sigbus_count >= 8) {
3532: ABORT("Got more than 8 SIGBUSs in a row!");
3533: } else {
3534: GC_sigbus_count++;
3535: GC_err_printf0("GC: WARNING: Ignoring SIGBUS.\n");
3536: }
3537: }
3538: #endif /* BROKEN_EXCEPTION_HANDLING */
3539:
3540: void GC_dirty_init() {
3541: kern_return_t r;
3542: mach_port_t me;
3543: pthread_t thread;
3544: pthread_attr_t attr;
3545: exception_mask_t mask;
3546:
3547: # ifdef PRINTSTATS
3548: GC_printf0("Inititalizing mach/darwin mprotect virtual dirty bit "
3549: "implementation\n");
3550: # endif
3551: # ifdef BROKEN_EXCEPTION_HANDLING
3552: GC_err_printf0("GC: WARNING: Enabling workarounds for various darwin "
3553: "exception handling bugs.\n");
3554: # endif
3555: GC_dirty_maintained = TRUE;
3556: if (GC_page_size % HBLKSIZE != 0) {
3557: GC_err_printf0("Page size not multiple of HBLKSIZE\n");
3558: ABORT("Page size not multiple of HBLKSIZE");
3559: }
3560:
3561: GC_task_self = me = mach_task_self();
3562:
3563: r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.exception);
3564: if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (exception port)");
3565:
3566: r = mach_port_insert_right(me,GC_ports.exception,GC_ports.exception,
3567: MACH_MSG_TYPE_MAKE_SEND);
3568: if(r != KERN_SUCCESS)
3569: ABORT("mach_port_insert_right failed (exception port)");
3570:
3571: #if defined(THREADS)
3572: r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.reply);
3573: if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (reply port)");
3574: #endif
3575:
3576: /* The exceptions we want to catch */
3577: mask = EXC_MASK_BAD_ACCESS;
3578:
3579: r = task_get_exception_ports(
3580: me,
3581: mask,
3582: GC_old_exc_ports.masks,
3583: &GC_old_exc_ports.count,
3584: GC_old_exc_ports.ports,
3585: GC_old_exc_ports.behaviors,
3586: GC_old_exc_ports.flavors
3587: );
3588: if(r != KERN_SUCCESS) ABORT("task_get_exception_ports failed");
3589:
3590: r = task_set_exception_ports(
3591: me,
3592: mask,
3593: GC_ports.exception,
3594: EXCEPTION_DEFAULT,
3595: MACHINE_THREAD_STATE
3596: );
3597: if(r != KERN_SUCCESS) ABORT("task_set_exception_ports failed");
3598:
3599: if(pthread_attr_init(&attr) != 0) ABORT("pthread_attr_init failed");
3600: if(pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED) != 0)
3601: ABORT("pthread_attr_setdetachedstate failed");
3602:
3603: # undef pthread_create
3604: /* This will call the real pthread function, not our wrapper */
3605: if(pthread_create(&thread,&attr,GC_mprotect_thread,NULL) != 0)
3606: ABORT("pthread_create failed");
3607: pthread_attr_destroy(&attr);
3608:
3609: /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
3610: #ifdef BROKEN_EXCEPTION_HANDLING
3611: {
3612: struct sigaction sa, oldsa;
3613: sa.sa_handler = (SIG_PF)GC_darwin_sigbus;
3614: sigemptyset(&sa.sa_mask);
3615: sa.sa_flags = SA_RESTART|SA_SIGINFO;
3616: if(sigaction(SIGBUS,&sa,&oldsa) < 0) ABORT("sigaction");
3617: GC_old_bus_handler = (SIG_PF)oldsa.sa_handler;
3618: if (GC_old_bus_handler != SIG_DFL) {
3619: # ifdef PRINTSTATS
3620: GC_err_printf0("Replaced other SIGBUS handler\n");
3621: # endif
3622: }
3623: }
3624: #endif /* BROKEN_EXCEPTION_HANDLING */
3625: }
3626:
3627: /* The source code for Apple's GDB was used as a reference for the exception
3628: forwarding code. This code is similar to be GDB code only because there is
3629: only one way to do it. */
3630: static kern_return_t GC_forward_exception(
3631: mach_port_t thread,
3632: mach_port_t task,
3633: exception_type_t exception,
3634: exception_data_t data,
3635: mach_msg_type_number_t data_count
3636: ) {
3637: int i;
3638: kern_return_t r;
3639: mach_port_t port;
3640: exception_behavior_t behavior;
3641: thread_state_flavor_t flavor;
3642:
3643: thread_state_data_t thread_state;
3644: mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
3645:
3646: for(i=0;i<GC_old_exc_ports.count;i++)
3647: if(GC_old_exc_ports.masks[i] & (1 << exception))
3648: break;
3649: if(i==GC_old_exc_ports.count) ABORT("No handler for exception!");
3650:
3651: port = GC_old_exc_ports.ports[i];
3652: behavior = GC_old_exc_ports.behaviors[i];
3653: flavor = GC_old_exc_ports.flavors[i];
3654:
3655: if(behavior != EXCEPTION_DEFAULT) {
3656: r = thread_get_state(thread,flavor,thread_state,&thread_state_count);
3657: if(r != KERN_SUCCESS)
3658: ABORT("thread_get_state failed in forward_exception");
3659: }
3660:
3661: switch(behavior) {
3662: case EXCEPTION_DEFAULT:
3663: r = exception_raise(port,thread,task,exception,data,data_count);
3664: break;
3665: case EXCEPTION_STATE:
3666: r = exception_raise_state(port,thread,task,exception,data,
3667: data_count,&flavor,thread_state,thread_state_count,
3668: thread_state,&thread_state_count);
3669: break;
3670: case EXCEPTION_STATE_IDENTITY:
3671: r = exception_raise_state_identity(port,thread,task,exception,data,
3672: data_count,&flavor,thread_state,thread_state_count,
3673: thread_state,&thread_state_count);
3674: break;
3675: default:
3676: r = KERN_FAILURE; /* make gcc happy */
3677: ABORT("forward_exception: unknown behavior");
3678: break;
3679: }
3680:
3681: if(behavior != EXCEPTION_DEFAULT) {
3682: r = thread_set_state(thread,flavor,thread_state,thread_state_count);
3683: if(r != KERN_SUCCESS)
3684: ABORT("thread_set_state failed in forward_exception");
3685: }
3686:
3687: return r;
3688: }
3689:
3690: #define FWD() GC_forward_exception(thread,task,exception,code,code_count)
3691:
3692: /* This violates the namespace rules but there isn't anything that can be done
3693: about it. The exception handling stuff is hard coded to call this */
3694: kern_return_t
3695: catch_exception_raise(
3696: mach_port_t exception_port,mach_port_t thread,mach_port_t task,
3697: exception_type_t exception,exception_data_t code,
3698: mach_msg_type_number_t code_count
3699: ) {
3700: kern_return_t r;
3701: char *addr;
3702: struct hblk *h;
3703: int i;
3704: #ifdef POWERPC
3705: thread_state_flavor_t flavor = PPC_EXCEPTION_STATE;
3706: mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE_COUNT;
3707: ppc_exception_state_t exc_state;
3708: #else
3709: # error FIXME for non-ppc darwin
3710: #endif
3711:
3712:
3713: if(exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
3714: #ifdef DEBUG_EXCEPTION_HANDLING
3715: /* We aren't interested, pass it on to the old handler */
3716: GC_printf3("Exception: 0x%x Code: 0x%x 0x%x in catch....\n",
3717: exception,
3718: code_count > 0 ? code[0] : -1,
3719: code_count > 1 ? code[1] : -1);
3720: #endif
3721: return FWD();
3722: }
3723:
3724: r = thread_get_state(thread,flavor,
3725: (natural_t*)&exc_state,&exc_state_count);
3726: if(r != KERN_SUCCESS) {
3727: /* The thread is supposed to be suspended while the exception handler
3728: is called. This shouldn't fail. */
3729: #ifdef BROKEN_EXCEPTION_HANDLING
3730: GC_err_printf0("thread_get_state failed in "
3731: "catch_exception_raise\n");
3732: return KERN_SUCCESS;
3733: #else
3734: ABORT("thread_get_state failed in catch_exception_raise");
3735: #endif
3736: }
3737:
3738: /* This is the address that caused the fault */
3739: addr = (char*) exc_state.dar;
3740:
3741: if((HDR(addr)) == 0) {
3742: /* Ugh... just like the SIGBUS problem above, it seems we get a bogus
3743: KERN_PROTECTION_FAILURE every once and a while. We wait till we get
3744: a bunch in a row before doing anything about it. If a "real" fault
3745: ever occurres it'll just keep faulting over and over and we'll hit
3746: the limit pretty quickly. */
3747: #ifdef BROKEN_EXCEPTION_HANDLING
3748: static char *last_fault;
3749: static int last_fault_count;
3750:
3751: if(addr != last_fault) {
3752: last_fault = addr;
3753: last_fault_count = 0;
3754: }
3755: if(++last_fault_count < 32) {
3756: if(last_fault_count == 1)
3757: GC_err_printf1(
3758: "GC: WARNING: Ignoring KERN_PROTECTION_FAILURE at %p\n",
3759: addr);
3760: return KERN_SUCCESS;
3761: }
3762:
3763: GC_err_printf1("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr);
3764: /* Can't pass it along to the signal handler because that is
3765: ignoring SIGBUS signals. We also shouldn't call ABORT here as
3766: signals don't always work too well from the exception handler. */
3767: GC_err_printf0("Aborting\n");
3768: exit(EXIT_FAILURE);
3769: #else /* BROKEN_EXCEPTION_HANDLING */
3770: /* Pass it along to the next exception handler
3771: (which should call SIGBUS/SIGSEGV) */
3772: return FWD();
3773: #endif /* !BROKEN_EXCEPTION_HANDLING */
3774: }
3775:
3776: #ifdef BROKEN_EXCEPTION_HANDLING
3777: /* Reset the number of consecutive SIGBUSs */
3778: GC_sigbus_count = 0;
3779: #endif
3780:
3781: if(GC_mprotect_state == GC_MP_NORMAL) { /* common case */
3782: h = (struct hblk*)((word)addr & ~(GC_page_size-1));
3783: UNPROTECT(h, GC_page_size);
3784: for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3785: register int index = PHT_HASH(h+i);
3786: async_set_pht_entry_from_index(GC_dirty_pages, index);
3787: }
3788: } else if(GC_mprotect_state == GC_MP_DISCARDING) {
3789: /* Lie to the thread for now. No sense UNPROTECT()ing the memory
3790: when we're just going to PROTECT() it again later. The thread
3791: will just fault again once it resumes */
3792: } else {
3793: /* Shouldn't happen, i don't think */
3794: GC_printf0("KERN_PROTECTION_FAILURE while world is stopped\n");
3795: return FWD();
3796: }
3797: return KERN_SUCCESS;
3798: }
3799: #undef FWD
3800:
3801: /* These should never be called, but just in case... */
3802: kern_return_t catch_exception_raise_state(mach_port_name_t exception_port,
3803: int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
3804: int flavor, thread_state_t old_state, int old_stateCnt,
3805: thread_state_t new_state, int new_stateCnt)
3806: {
3807: ABORT("catch_exception_raise_state");
3808: return(KERN_INVALID_ARGUMENT);
3809: }
3810: kern_return_t catch_exception_raise_state_identity(
3811: mach_port_name_t exception_port, mach_port_t thread, mach_port_t task,
3812: int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
3813: int flavor, thread_state_t old_state, int old_stateCnt,
3814: thread_state_t new_state, int new_stateCnt)
3815: {
3816: ABORT("catch_exception_raise_state_identity");
3817: return(KERN_INVALID_ARGUMENT);
3818: }
3819:
3820:
3821: #endif /* DARWIN && MPROTECT_VDB */
3822:
1.6 noro 3823: # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
3824: int GC_incremental_protection_needs()
3825: {
3826: return GC_PROTECTS_NONE;
3827: }
3828: # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
3829:
1.1 noro 3830: /*
3831: * Call stack save code for debugging.
3832: * Should probably be in mach_dep.c, but that requires reorganization.
3833: */
1.4 noro 3834:
3835: /* I suspect the following works for most X86 *nix variants, so */
3836: /* long as the frame pointer is explicitly stored. In the case of gcc, */
3837: /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
3838: #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
1.6 noro 3839: # include <features.h>
3840:
1.4 noro 3841: struct frame {
3842: struct frame *fr_savfp;
3843: long fr_savpc;
3844: long fr_arg[NARGS]; /* All the arguments go here. */
3845: };
3846: #endif
3847:
3848: #if defined(SPARC)
3849: # if defined(LINUX)
1.6 noro 3850: # include <features.h>
3851:
1.4 noro 3852: struct frame {
3853: long fr_local[8];
3854: long fr_arg[6];
3855: struct frame *fr_savfp;
3856: long fr_savpc;
3857: # ifndef __arch64__
3858: char *fr_stret;
3859: # endif
3860: long fr_argd[6];
3861: long fr_argx[0];
3862: };
3863: # else
3864: # if defined(SUNOS4)
3865: # include <machine/frame.h>
3866: # else
3867: # if defined (DRSNX)
3868: # include <sys/sparc/frame.h>
3869: # else
3870: # if defined(OPENBSD) || defined(NETBSD)
3871: # include <frame.h>
3872: # else
3873: # include <sys/frame.h>
3874: # endif
3875: # endif
3876: # endif
3877: # endif
3878: # if NARGS > 6
1.1 noro 3879: --> We only know how to to get the first 6 arguments
1.4 noro 3880: # endif
3881: #endif /* SPARC */
1.1 noro 3882:
1.6 noro 3883: #ifdef NEED_CALLINFO
1.1 noro 3884: /* Fill in the pc and argument information for up to NFRAMES of my */
3885: /* callers. Ignore my frame and my callers frame. */
3886:
1.6 noro 3887: #ifdef LINUX
3888: # include <unistd.h>
3889: #endif
3890:
3891: #endif /* NEED_CALLINFO */
3892:
3893: #ifdef SAVE_CALL_CHAIN
3894:
3895: #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
1.7 noro 3896: && defined(GC_HAVE_BUILTIN_BACKTRACE)
3897:
3898: #include <execinfo.h>
1.6 noro 3899:
3900: void GC_save_callers (info)
3901: struct callinfo info[NFRAMES];
3902: {
3903: void * tmp_info[NFRAMES + 1];
3904: int npcs, i;
3905: # define IGNORE_FRAMES 1
3906:
3907: /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
3908: /* points to our own frame. */
3909: GC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
3910: npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
3911: BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
3912: for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
3913: }
3914:
3915: #else /* No builtin backtrace; do it ourselves */
3916:
1.4 noro 3917: #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
1.1 noro 3918: # define FR_SAVFP fr_fp
3919: # define FR_SAVPC fr_pc
3920: #else
3921: # define FR_SAVFP fr_savfp
3922: # define FR_SAVPC fr_savpc
3923: #endif
3924:
1.4 noro 3925: #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
3926: # define BIAS 2047
3927: #else
3928: # define BIAS 0
3929: #endif
3930:
1.1 noro 3931: void GC_save_callers (info)
3932: struct callinfo info[NFRAMES];
3933: {
3934: struct frame *frame;
3935: struct frame *fp;
3936: int nframes = 0;
1.4 noro 3937: # ifdef I386
3938: /* We assume this is turned on only with gcc as the compiler. */
3939: asm("movl %%ebp,%0" : "=r"(frame));
3940: fp = frame;
3941: # else
3942: frame = (struct frame *) GC_save_regs_in_stack ();
3943: fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
3944: #endif
1.1 noro 3945:
1.4 noro 3946: for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
3947: && (nframes < NFRAMES));
3948: fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
1.1 noro 3949: register int i;
3950:
3951: info[nframes].ci_pc = fp->FR_SAVPC;
1.6 noro 3952: # if NARGS > 0
3953: for (i = 0; i < NARGS; i++) {
3954: info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
3955: }
3956: # endif /* NARGS > 0 */
1.1 noro 3957: }
3958: if (nframes < NFRAMES) info[nframes].ci_pc = 0;
3959: }
3960:
1.6 noro 3961: #endif /* No builtin backtrace */
3962:
1.1 noro 3963: #endif /* SAVE_CALL_CHAIN */
1.6 noro 3964:
3965: #ifdef NEED_CALLINFO
3966:
3967: /* Print info to stderr. We do NOT hold the allocation lock */
3968: void GC_print_callers (info)
3969: struct callinfo info[NFRAMES];
3970: {
3971: register int i;
3972: static int reentry_count = 0;
1.7 noro 3973: GC_bool stop = FALSE;
1.6 noro 3974:
3975: LOCK();
3976: ++reentry_count;
3977: UNLOCK();
3978:
3979: # if NFRAMES == 1
3980: GC_err_printf0("\tCaller at allocation:\n");
3981: # else
3982: GC_err_printf0("\tCall chain at allocation:\n");
3983: # endif
1.7 noro 3984: for (i = 0; i < NFRAMES && !stop ; i++) {
1.6 noro 3985: if (info[i].ci_pc == 0) break;
3986: # if NARGS > 0
3987: {
3988: int j;
3989:
3990: GC_err_printf0("\t\targs: ");
3991: for (j = 0; j < NARGS; j++) {
3992: if (j != 0) GC_err_printf0(", ");
3993: GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
3994: ~(info[i].ci_arg[j]));
3995: }
3996: GC_err_printf0("\n");
3997: }
3998: # endif
3999: if (reentry_count > 1) {
4000: /* We were called during an allocation during */
4001: /* a previous GC_print_callers call; punt. */
4002: GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
4003: continue;
4004: }
4005: {
4006: # ifdef LINUX
4007: FILE *pipe;
4008: # endif
1.7 noro 4009: # if defined(GC_HAVE_BUILTIN_BACKTRACE)
1.6 noro 4010: char **sym_name =
4011: backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
4012: char *name = sym_name[0];
4013: # else
4014: char buf[40];
4015: char *name = buf;
4016: sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc);
4017: # endif
1.7 noro 4018: # if defined(LINUX) && !defined(SMALL_CONFIG)
4019: /* Try for a line number. */
4020: {
1.6 noro 4021: # define EXE_SZ 100
4022: static char exe_name[EXE_SZ];
4023: # define CMD_SZ 200
4024: char cmd_buf[CMD_SZ];
4025: # define RESULT_SZ 200
4026: static char result_buf[RESULT_SZ];
4027: size_t result_len;
4028: static GC_bool found_exe_name = FALSE;
4029: static GC_bool will_fail = FALSE;
4030: int ret_code;
4031: /* Try to get it via a hairy and expensive scheme. */
4032: /* First we get the name of the executable: */
4033: if (will_fail) goto out;
4034: if (!found_exe_name) {
4035: ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
4036: if (ret_code < 0 || ret_code >= EXE_SZ
4037: || exe_name[0] != '/') {
4038: will_fail = TRUE; /* Dont try again. */
4039: goto out;
4040: }
4041: exe_name[ret_code] = '\0';
4042: found_exe_name = TRUE;
4043: }
4044: /* Then we use popen to start addr2line -e <exe> <addr> */
4045: /* There are faster ways to do this, but hopefully this */
4046: /* isn't time critical. */
1.7 noro 4047: sprintf(cmd_buf, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name,
1.6 noro 4048: (unsigned long)info[i].ci_pc);
4049: pipe = popen(cmd_buf, "r");
1.7 noro 4050: if (pipe == NULL
4051: || (result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe))
4052: == 0) {
4053: if (pipe != NULL) pclose(pipe);
1.6 noro 4054: will_fail = TRUE;
4055: goto out;
4056: }
4057: if (result_buf[result_len - 1] == '\n') --result_len;
1.7 noro 4058: result_buf[result_len] = 0;
1.6 noro 4059: if (result_buf[0] == '?'
4060: || result_buf[result_len-2] == ':'
1.7 noro 4061: && result_buf[result_len-1] == '0') {
4062: pclose(pipe);
1.6 noro 4063: goto out;
1.7 noro 4064: }
4065: /* Get rid of embedded newline, if any. Test for "main" */
4066: {
4067: char * nl = strchr(result_buf, '\n');
4068: if (nl != NULL && nl < result_buf + result_len) {
4069: *nl = ':';
4070: }
4071: if (strncmp(result_buf, "main", nl - result_buf) == 0) {
4072: stop = TRUE;
4073: }
4074: }
1.6 noro 4075: if (result_len < RESULT_SZ - 25) {
4076: /* Add in hex address */
4077: sprintf(result_buf + result_len, " [0x%lx]",
4078: (unsigned long)info[i].ci_pc);
4079: }
4080: name = result_buf;
4081: pclose(pipe);
1.7 noro 4082: out:;
1.6 noro 4083: }
4084: # endif /* LINUX */
4085: GC_err_printf1("\t\t%s\n", name);
1.7 noro 4086: # if defined(GC_HAVE_BUILTIN_BACKTRACE)
4087: free(sym_name); /* May call GC_free; that's OK */
4088: # endif
1.6 noro 4089: }
4090: }
4091: LOCK();
4092: --reentry_count;
4093: UNLOCK();
4094: }
4095:
4096: #endif /* NEED_CALLINFO */
1.1 noro 4097:
1.4 noro 4098:
4099:
4100: #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
4101:
4102: /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
4103: addresses in FIND_LEAK output. */
4104:
1.7 noro 4105: static word dump_maps(char *maps)
4106: {
4107: GC_err_write(maps, strlen(maps));
4108: return 1;
4109: }
4110:
1.4 noro 4111: void GC_print_address_map()
4112: {
4113: GC_err_printf0("---------- Begin address map ----------\n");
1.7 noro 4114: GC_apply_to_maps(dump_maps);
1.4 noro 4115: GC_err_printf0("---------- End address map ----------\n");
4116: }
4117:
4118: #endif
1.1 noro 4119:
4120:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>