Annotation of OpenXM_contrib2/asir2000/gc5.3/dyn_load.c, Revision 1.2
1.1 noro 1: /*
2: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3: * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
4: *
5: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7: *
8: * Permission is hereby granted to use or copy this program
9: * for any purpose, provided the above notices are retained on all copies.
10: * Permission to modify the code and to distribute modified code is granted,
11: * provided the above notices are retained, and a notice that the code was
12: * modified is included with the above copyright notice.
13: *
14: * Original author: Bill Janssen
15: * Heavily modified by Hans Boehm and others
16: */
17:
18: /*
19: * This is incredibly OS specific code for tracking down data sections in
20: * dynamic libraries. There appears to be no way of doing this quickly
21: * without groveling through undocumented data structures. We would argue
22: * that this is a bug in the design of the dlopen interface. THIS CODE
23: * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24: * to let your vendor know ...
25: *
26: * None of this is safe with dlclose and incremental collection.
27: * But then not much of anything is safe in the presence of dlclose.
28: */
29: #ifndef MACOS
30: # include <sys/types.h>
31: #endif
32: #include "gc_priv.h"
33:
34: /* BTL: avoid circular redefinition of dlopen if SOLARIS_THREADS defined */
35: # if (defined(LINUX_THREADS) || defined(SOLARIS_THREADS) \
36: || defined(HPUX_THREADS) || defined(IRIX_THREADS)) && defined(dlopen) \
37: && !defined(USE_LD_WRAP)
38: /* To support threads in Solaris, gc.h interposes on dlopen by */
39: /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
40: /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
41: /* real system dlopen() in their implementation. We first remove */
42: /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
43: # undef dlopen
44: # define GC_must_restore_redefined_dlopen
45: # else
46: # undef GC_must_restore_redefined_dlopen
47: # endif
48:
49: #if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
50: #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
51: !defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
52: !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
53: !defined(RS6000) && !defined(SCO_ELF)
54: --> We only know how to find data segments of dynamic libraries for the
55: --> above. Additional SVR4 variants might not be too
56: --> hard to add.
57: #endif
58:
59: #include <stdio.h>
60: #ifdef SUNOS5DL
61: # include <sys/elf.h>
62: # include <dlfcn.h>
63: # include <link.h>
64: #endif
65: #ifdef SUNOS4
66: # include <dlfcn.h>
67: # include <link.h>
68: # include <a.out.h>
69: /* struct link_map field overrides */
70: # define l_next lm_next
71: # define l_addr lm_addr
72: # define l_name lm_name
73: #endif
74:
75:
76: #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
77:
78: #ifdef LINT
79: Elf32_Dyn _DYNAMIC;
80: #endif
81:
82: static struct link_map *
83: GC_FirstDLOpenedLinkMap()
84: {
85: extern Elf32_Dyn _DYNAMIC;
86: Elf32_Dyn *dp;
87: struct r_debug *r;
88: static struct link_map * cachedResult = 0;
89: static Elf32_Dyn *dynStructureAddr = 0;
90: /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
91:
92: # ifdef SUNOS53_SHARED_LIB
93: /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
94: /* up properly in dynamically linked .so's. This means we have */
95: /* to use its value in the set of original object files loaded */
96: /* at program startup. */
97: if( dynStructureAddr == 0 ) {
98: void* startupSyms = dlopen(0, RTLD_LAZY);
99: dynStructureAddr = (Elf32_Dyn*)dlsym(startupSyms, "_DYNAMIC");
100: }
101: # else
102: dynStructureAddr = &_DYNAMIC;
103: # endif
104:
105: if( dynStructureAddr == 0) {
106: return(0);
107: }
108: if( cachedResult == 0 ) {
109: int tag;
110: for( dp = ((Elf32_Dyn *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
111: if( tag == DT_DEBUG ) {
112: struct link_map *lm
113: = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
114: if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
115: break;
116: }
117: }
118: }
119: return cachedResult;
120: }
121:
122: #endif /* SUNOS5DL ... */
123:
124: #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
125:
126: #ifdef LINT
127: struct link_dynamic _DYNAMIC;
128: #endif
129:
130: static struct link_map *
131: GC_FirstDLOpenedLinkMap()
132: {
133: extern struct link_dynamic _DYNAMIC;
134:
135: if( &_DYNAMIC == 0) {
136: return(0);
137: }
138: return(_DYNAMIC.ld_un.ld_1->ld_loaded);
139: }
140:
141: /* Return the address of the ld.so allocated common symbol */
142: /* with the least address, or 0 if none. */
143: static ptr_t GC_first_common()
144: {
145: ptr_t result = 0;
146: extern struct link_dynamic _DYNAMIC;
147: struct rtc_symb * curr_symbol;
148:
149: if( &_DYNAMIC == 0) {
150: return(0);
151: }
152: curr_symbol = _DYNAMIC.ldd -> ldd_cp;
153: for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
154: if (result == 0
155: || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
156: result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
157: }
158: }
159: return(result);
160: }
161:
162: #endif /* SUNOS4 ... */
163:
164: # if defined(LINUX_THREADS) || defined(SOLARIS_THREADS) \
165: || defined(HPUX_THREADS) || defined(IRIX_THREADS)
166: /* Make sure we're not in the middle of a collection, and make */
167: /* sure we don't start any. Returns previous value of GC_dont_gc. */
168: /* This is invoked prior to a dlopen call to avoid synchronization */
169: /* issues. We can't just acquire the allocation lock, since startup */
170: /* code in dlopen may try to allocate. */
171: /* This solution risks heap growth in the presence of many dlopen */
172: /* calls in either a multithreaded environment, or if the library */
173: /* initialization code allocates substantial amounts of GC'ed memory. */
174: /* But I don't know of a better solution. */
175: /* This can still deadlock if the client explicitly starts a GC */
176: /* during the dlopen. He shouldn't do that. */
177: static GC_bool disable_gc_for_dlopen()
178: {
179: GC_bool result;
180: LOCK();
181: result = GC_dont_gc;
182: while (GC_incremental && GC_collection_in_progress()) {
183: GC_collect_a_little_inner(1000);
184: }
185: GC_dont_gc = TRUE;
186: UNLOCK();
187: return(result);
188: }
189:
190: /* Redefine dlopen to guarantee mutual exclusion with */
191: /* GC_register_dynamic_libraries. */
192: /* Should probably happen for other operating systems, too. */
193:
194: #include <dlfcn.h>
195:
196: #ifdef USE_LD_WRAP
197: void * __wrap_dlopen(const char *path, int mode)
198: #else
199: void * GC_dlopen(path, mode)
200: GC_CONST char * path;
201: int mode;
202: #endif
203: {
204: void * result;
205: GC_bool dont_gc_save;
206:
207: # ifndef USE_PROC_FOR_LIBRARIES
208: dont_gc_save = disable_gc_for_dlopen();
209: # endif
210: # ifdef USE_LD_WRAP
211: result = __real_dlopen(path, mode);
212: # else
213: result = dlopen(path, mode);
214: # endif
215: # ifndef USE_PROC_FOR_LIBRARIES
216: GC_dont_gc = dont_gc_save;
217: # endif
218: return(result);
219: }
220: # endif /* SOLARIS_THREADS */
221:
222: /* BTL: added to fix circular dlopen definition if SOLARIS_THREADS defined */
223: # if defined(GC_must_restore_redefined_dlopen)
224: # define dlopen GC_dlopen
225: # endif
226:
227: # if defined(SUNOS4) || defined(SUNOS5DL)
228: /* Add dynamic library data sections to the root set. */
229: # if !defined(PCR) && !defined(SOLARIS_THREADS) && defined(THREADS)
230: # ifndef SRC_M3
231: --> fix mutual exclusion with dlopen
232: # endif /* We assume M3 programs don't call dlopen for now */
233: # endif
234:
235: # ifndef USE_PROC_FOR_LIBRARIES
236: void GC_register_dynamic_libraries()
237: {
238: struct link_map *lm = GC_FirstDLOpenedLinkMap();
239:
240:
241: for (lm = GC_FirstDLOpenedLinkMap();
242: lm != (struct link_map *) 0; lm = lm->l_next)
243: {
244: # ifdef SUNOS4
245: struct exec *e;
246:
247: e = (struct exec *) lm->lm_addr;
248: GC_add_roots_inner(
249: ((char *) (N_DATOFF(*e) + lm->lm_addr)),
250: ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
251: TRUE);
252: # endif
253: # ifdef SUNOS5DL
254: Elf32_Ehdr * e;
255: Elf32_Phdr * p;
256: unsigned long offset;
257: char * start;
258: register int i;
259:
260: e = (Elf32_Ehdr *) lm->l_addr;
261: p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
262: offset = ((unsigned long)(lm->l_addr));
263: for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
264: switch( p->p_type ) {
265: case PT_LOAD:
266: {
267: if( !(p->p_flags & PF_W) ) break;
268: start = ((char *)(p->p_vaddr)) + offset;
269: GC_add_roots_inner(
270: start,
271: start + p->p_memsz,
272: TRUE
273: );
274: }
275: break;
276: default:
277: break;
278: }
279: }
280: # endif
281: }
282: # ifdef SUNOS4
283: {
284: static ptr_t common_start = 0;
285: ptr_t common_end;
286: extern ptr_t GC_find_limit();
287:
288: if (common_start == 0) common_start = GC_first_common();
289: if (common_start != 0) {
290: common_end = GC_find_limit(common_start, TRUE);
291: GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
292: }
293: }
294: # endif
295: }
296:
297: # endif /* !USE_PROC ... */
298: # endif /* SUNOS */
299:
1.2 ! noro 300: #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
! 301: (defined(NETBSD) && defined(__ELF__))
! 302:
! 303: #if 1
! 304: /* #ifdef USE_PROC_FOR_LIBRARIES */
! 305:
! 306: #include <string.h>
! 307:
! 308: #include <sys/stat.h>
! 309: #include <fcntl.h>
! 310: #include <unistd.h>
! 311:
! 312: #define MAPS_BUF_SIZE (32*1024)
! 313:
! 314: extern ssize_t GC_repeat_read(int fd, char *buf, size_t count);
! 315: /* Repeatedly read until buffer is filled, or EOF is encountered */
! 316: /* Defined in os_dep.c. */
! 317:
! 318: static char *parse_map_entry(char *buf_ptr, word *start, word *end,
! 319: char *prot_buf, unsigned int *maj_dev);
! 320:
! 321: /* Repeatedly perform a read call until the buffer is filled or */
! 322: /* we encounter EOF. */
! 323: ssize_t GC_repeat_read(int fd, char *buf, size_t count)
! 324: {
! 325: ssize_t num_read = 0;
! 326: ssize_t result;
! 327:
! 328: while (num_read < count) {
! 329: result = read(fd, buf + num_read, count - num_read);
! 330: if (result < 0) return result;
! 331: if (result == 0) break;
! 332: num_read += result;
! 333: }
! 334: return num_read;
! 335: }
! 336:
! 337: void GC_register_dynamic_libraries()
! 338: {
! 339: int f;
! 340: int result;
! 341: char prot_buf[5];
! 342: int maps_size;
! 343: char maps_temp[32768];
! 344: char *maps_buf;
! 345: char *buf_ptr;
! 346: int count;
! 347: word start, end;
! 348: unsigned int maj_dev, min_dev;
! 349: word least_ha, greatest_ha;
! 350: unsigned i;
! 351: word datastart = (word)(DATASTART);
! 352:
! 353: /* Read /proc/self/maps */
! 354: /* Note that we may not allocate, and thus can't use stdio. */
! 355: f = open("/proc/self/maps", O_RDONLY);
! 356: if (-1 == f) ABORT("Couldn't open /proc/self/maps");
! 357: /* stat() doesn't work for /proc/self/maps, so we have to
! 358: read it to find out how large it is... */
! 359: maps_size = 0;
! 360: do {
! 361: result = GC_repeat_read(f, maps_temp, sizeof(maps_temp));
! 362: if (result <= 0) ABORT("Couldn't read /proc/self/maps");
! 363: maps_size += result;
! 364: } while (result == sizeof(maps_temp));
! 365:
! 366: if (maps_size > sizeof(maps_temp)) {
! 367: /* If larger than our buffer, close and re-read it. */
! 368: close(f);
! 369: f = open("/proc/self/maps", O_RDONLY);
! 370: if (-1 == f) ABORT("Couldn't open /proc/self/maps");
! 371: maps_buf = alloca(maps_size);
! 372: if (NULL == maps_buf) ABORT("/proc/self/maps alloca failed");
! 373: result = GC_repeat_read(f, maps_buf, maps_size);
! 374: if (result <= 0) ABORT("Couldn't read /proc/self/maps");
! 375: } else {
! 376: /* Otherwise use the fixed size buffer */
! 377: maps_buf = maps_temp;
! 378: }
! 379:
! 380: close(f);
! 381: maps_buf[result] = '\0';
! 382: buf_ptr = maps_buf;
! 383: /* Compute heap bounds. Should be done by add_to_heap? */
! 384: least_ha = (word)(-1);
! 385: greatest_ha = 0;
! 386: for (i = 0; i < GC_n_heap_sects; ++i) {
! 387: word sect_start = (word)GC_heap_sects[i].hs_start;
! 388: word sect_end = sect_start + GC_heap_sects[i].hs_bytes;
! 389: if (sect_start < least_ha) least_ha = sect_start;
! 390: if (sect_end > greatest_ha) greatest_ha = sect_end;
! 391: }
! 392: if (greatest_ha < (word)GC_scratch_last_end_ptr)
! 393: greatest_ha = (word)GC_scratch_last_end_ptr;
! 394: for (;;) {
! 395:
! 396: buf_ptr = parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
! 397: if (buf_ptr == NULL) return;
! 398:
! 399: if (prot_buf[1] == 'w') {
! 400: /* This is a writable mapping. Add it to */
! 401: /* the root set unless it is already otherwise */
! 402: /* accounted for. */
! 403: if (start <= (word)GC_stackbottom && end >= (word)GC_stackbottom) {
! 404: /* Stack mapping; discard */
! 405: continue;
! 406: }
! 407: if (start <= datastart && end > datastart && maj_dev != 0) {
! 408: /* Main data segment; discard */
! 409: continue;
! 410: }
! 411: # ifdef THREADS
! 412: if (GC_segment_is_thread_stack(start, end)) continue;
! 413: # endif
! 414: /* The rest of this assumes that there is no mapping */
! 415: /* spanning the beginning of the data segment, or extending */
! 416: /* beyond the entire heap at both ends. */
! 417: /* Empirically these assumptions hold. */
! 418:
! 419: if (start < (word)DATAEND && end > (word)DATAEND) {
! 420: /* Rld may use space at the end of the main data */
! 421: /* segment. Thus we add that in. */
! 422: start = (word)DATAEND;
! 423: }
! 424: if (start < least_ha && end > least_ha) {
! 425: end = least_ha;
! 426: }
! 427: if (start < greatest_ha && end > greatest_ha) {
! 428: start = greatest_ha;
! 429: }
! 430: if (start >= least_ha && end <= greatest_ha) continue;
! 431: GC_add_roots_inner((char *)start, (char *)end, TRUE);
! 432: }
! 433: }
! 434: }
! 435:
! 436: //
! 437: // parse_map_entry parses an entry from /proc/self/maps so we can
! 438: // locate all writable data segments that belong to shared libraries.
! 439: // The format of one of these entries and the fields we care about
! 440: // is as follows:
! 441: // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
! 442: // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
! 443: // start end prot maj_dev
! 444: // 0 9 18 32
! 445: //
! 446: // The parser is called with a pointer to the entry and the return value
! 447: // is either NULL or is advanced to the next entry(the byte after the
! 448: // trailing '\n'.)
! 449: //
! 450: #define OFFSET_MAP_START 0
! 451: #define OFFSET_MAP_END 9
! 452: #define OFFSET_MAP_PROT 18
! 453: #define OFFSET_MAP_MAJDEV 32
! 454:
! 455: static char *parse_map_entry(char *buf_ptr, word *start, word *end,
! 456: char *prot_buf, unsigned int *maj_dev)
! 457: {
! 458: int i;
! 459: unsigned int val;
! 460: char *tok;
! 461:
! 462: if (buf_ptr == NULL || *buf_ptr == '\0') {
! 463: return NULL;
! 464: }
! 465:
! 466: memcpy(prot_buf, buf_ptr+OFFSET_MAP_PROT, 4); // do the protections first
! 467: prot_buf[4] = '\0';
! 468:
! 469: if (prot_buf[1] == 'w') { // we can skip all of this if it's not writable
! 470:
! 471: tok = buf_ptr;
! 472: buf_ptr[OFFSET_MAP_START+8] = '\0';
! 473: *start = strtoul(tok, NULL, 16);
! 474:
! 475: tok = buf_ptr+OFFSET_MAP_END;
! 476: buf_ptr[OFFSET_MAP_END+8] = '\0';
! 477: *end = strtoul(tok, NULL, 16);
! 478:
! 479: buf_ptr += OFFSET_MAP_MAJDEV;
! 480: tok = buf_ptr;
! 481: while (*buf_ptr != ':') buf_ptr++;
! 482: *buf_ptr++ = '\0';
! 483: *maj_dev = strtoul(tok, NULL, 16);
! 484: }
! 485:
! 486: while (*buf_ptr && *buf_ptr++ != '\n');
! 487:
! 488: return buf_ptr;
! 489: }
! 490:
! 491: #else /* !USE_PROC_FOR_LIBRARIES */
1.1 noro 492:
493: /* Dynamic loading code for Linux running ELF. Somewhat tested on
494: * Linux/x86, untested but hopefully should work on Linux/Alpha.
495: * This code was derived from the Solaris/ELF support. Thanks to
496: * whatever kind soul wrote that. - Patrick Bridges */
497:
1.2 ! noro 498: #if defined(NETBSD)
! 499: # include <sys/exec_elf.h>
! 500: #else
! 501: # include <elf.h>
! 502: #endif
1.1 noro 503: #include <link.h>
504:
505: /* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
506: * define it for those older versions that don't. */
507: # ifndef ElfW
508: # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
509: # define ElfW(type) Elf32_##type
510: # else
511: # define ElfW(type) Elf64_##type
512: # endif
513: # endif
514:
515: static struct link_map *
516: GC_FirstDLOpenedLinkMap()
517: {
518: # ifdef __GNUC__
519: # pragma weak _DYNAMIC
520: # endif
521: extern ElfW(Dyn) _DYNAMIC[];
522: ElfW(Dyn) *dp;
523: struct r_debug *r;
524: static struct link_map *cachedResult = 0;
525:
526: if( _DYNAMIC == 0) {
527: return(0);
528: }
529: if( cachedResult == 0 ) {
530: int tag;
531: for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
532: if( tag == DT_DEBUG ) {
533: struct link_map *lm
534: = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
535: if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
536: break;
537: }
538: }
539: }
540: return cachedResult;
541: }
542:
543:
544: void GC_register_dynamic_libraries()
545: {
546: struct link_map *lm = GC_FirstDLOpenedLinkMap();
547:
548:
549: for (lm = GC_FirstDLOpenedLinkMap();
550: lm != (struct link_map *) 0; lm = lm->l_next)
551: {
552: ElfW(Ehdr) * e;
553: ElfW(Phdr) * p;
554: unsigned long offset;
555: char * start;
556: register int i;
557:
558: e = (ElfW(Ehdr) *) lm->l_addr;
559: p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
560: offset = ((unsigned long)(lm->l_addr));
561: for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
562: switch( p->p_type ) {
563: case PT_LOAD:
564: {
565: if( !(p->p_flags & PF_W) ) break;
566: start = ((char *)(p->p_vaddr)) + offset;
567: GC_add_roots_inner(start, start + p->p_memsz, TRUE);
568: }
569: break;
570: default:
571: break;
572: }
573: }
574: }
575: }
576:
1.2 ! noro 577: #endif /* !USE_PROC_FOR_LIBRARIES */
! 578:
! 579: #endif /* LINUX */
1.1 noro 580:
581: #if defined(IRIX5) || defined(USE_PROC_FOR_LIBRARIES)
582:
583: #include <sys/procfs.h>
584: #include <sys/stat.h>
585: #include <fcntl.h>
586: #include <elf.h>
587: #include <errno.h>
588:
589: extern void * GC_roots_present();
590: /* The type is a lie, since the real type doesn't make sense here, */
591: /* and we only test for NULL. */
592:
593: #ifndef GC_scratch_last_end_ptr /* Never an extern any more? */
594: extern ptr_t GC_scratch_last_end_ptr; /* End of GC_scratch_alloc arena */
595: #endif
596:
597: /* We use /proc to track down all parts of the address space that are */
598: /* mapped by the process, and throw out regions we know we shouldn't */
599: /* worry about. This may also work under other SVR4 variants. */
600: void GC_register_dynamic_libraries()
601: {
602: static int fd = -1;
603: char buf[30];
604: static prmap_t * addr_map = 0;
605: static int current_sz = 0; /* Number of records currently in addr_map */
606: static int needed_sz; /* Required size of addr_map */
607: register int i;
608: register long flags;
609: register ptr_t start;
610: register ptr_t limit;
611: ptr_t heap_start = (ptr_t)HEAP_START;
612: ptr_t heap_end = heap_start;
613:
614: # ifdef SUNOS5DL
615: # define MA_PHYS 0
616: # endif /* SUNOS5DL */
617:
618: if (fd < 0) {
619: sprintf(buf, "/proc/%d", getpid());
620: /* The above generates a lint complaint, since pid_t varies. */
621: /* It's unclear how to improve this. */
622: fd = open(buf, O_RDONLY);
623: if (fd < 0) {
624: ABORT("/proc open failed");
625: }
626: }
627: if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
628: GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
629: ABORT("/proc PIOCNMAP ioctl failed");
630: }
631: if (needed_sz >= current_sz) {
632: current_sz = needed_sz * 2 + 1;
633: /* Expansion, plus room for 0 record */
634: addr_map = (prmap_t *)GC_scratch_alloc((word)
635: (current_sz * sizeof(prmap_t)));
636: }
637: if (ioctl(fd, PIOCMAP, addr_map) < 0) {
638: GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
639: fd, errno, needed_sz, addr_map);
640: ABORT("/proc PIOCMAP ioctl failed");
641: };
642: if (GC_n_heap_sects > 0) {
643: heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
644: + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
645: if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
646: }
647: for (i = 0; i < needed_sz; i++) {
648: flags = addr_map[i].pr_mflags;
649: if ((flags & (MA_BREAK | MA_STACK | MA_PHYS)) != 0) goto irrelevant;
650: if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
651: goto irrelevant;
652: /* The latter test is empirically useless. Other than the */
653: /* main data and stack segments, everything appears to be */
654: /* mapped readable, writable, executable, and shared(!!). */
655: /* This makes no sense to me. - HB */
656: start = (ptr_t)(addr_map[i].pr_vaddr);
657: if (GC_roots_present(start)) goto irrelevant;
658: if (start < heap_end && start >= heap_start)
659: goto irrelevant;
660: # ifdef MMAP_STACKS
661: if (GC_is_thread_stack(start)) goto irrelevant;
662: # endif /* MMAP_STACKS */
663:
664: limit = start + addr_map[i].pr_size;
665: if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
666: /* Discard text segments, i.e. 0-offset mappings against */
667: /* executable files which appear to have ELF headers. */
668: caddr_t arg;
669: int obj;
670: # define MAP_IRR_SZ 10
671: static ptr_t map_irr[MAP_IRR_SZ];
672: /* Known irrelevant map entries */
673: static int n_irr = 0;
674: struct stat buf;
675: register int i;
676:
677: for (i = 0; i < n_irr; i++) {
678: if (map_irr[i] == start) goto irrelevant;
679: }
680: arg = (caddr_t)start;
681: obj = ioctl(fd, PIOCOPENM, &arg);
682: if (obj >= 0) {
683: fstat(obj, &buf);
684: close(obj);
685: if ((buf.st_mode & 0111) != 0) {
686: if (n_irr < MAP_IRR_SZ) {
687: map_irr[n_irr++] = start;
688: }
689: goto irrelevant;
690: }
691: }
692: }
693: GC_add_roots_inner(start, limit, TRUE);
694: irrelevant: ;
695: }
696: /* Dont keep cached descriptor, for now. Some kernels don't like us */
697: /* to keep a /proc file descriptor around during kill -9. */
698: if (close(fd) < 0) ABORT("Couldnt close /proc file");
699: fd = -1;
700: }
701:
702: # endif /* USE_PROC || IRIX5 */
703:
704: # ifdef MSWIN32
705:
706: # define WIN32_LEAN_AND_MEAN
707: # define NOSERVICE
708: # include <windows.h>
709: # include <stdlib.h>
710:
711: /* We traverse the entire address space and register all segments */
712: /* that could possibly have been written to. */
713: DWORD GC_allocation_granularity;
714:
715: extern GC_bool GC_is_heap_base (ptr_t p);
716:
717: # ifdef WIN32_THREADS
718: extern void GC_get_next_stack(char *start, char **lo, char **hi);
719: # endif
720:
721: void GC_cond_add_roots(char *base, char * limit)
722: {
723: char dummy;
724: char * stack_top
725: = (char *) ((word)(&dummy) & ~(GC_allocation_granularity-1));
726: if (base == limit) return;
727: # ifdef WIN32_THREADS
728: {
729: char * curr_base = base;
730: char * next_stack_lo;
731: char * next_stack_hi;
732:
733: for(;;) {
734: GC_get_next_stack(curr_base, &next_stack_lo, &next_stack_hi);
735: if (next_stack_lo >= limit) break;
736: GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
737: curr_base = next_stack_hi;
738: }
739: if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
740: }
741: # else
742: if (limit > stack_top && base < GC_stackbottom) {
743: /* Part of the stack; ignore it. */
744: return;
745: }
746: GC_add_roots_inner(base, limit, TRUE);
747: # endif
748: }
749:
750: extern GC_bool GC_win32s;
751:
752: void GC_register_dynamic_libraries()
753: {
754: MEMORY_BASIC_INFORMATION buf;
755: SYSTEM_INFO sysinfo;
756: DWORD result;
757: DWORD protect;
758: LPVOID p;
759: char * base;
760: char * limit, * new_limit;
761:
762: if (GC_win32s) return;
763: GetSystemInfo(&sysinfo);
764: base = limit = p = sysinfo.lpMinimumApplicationAddress;
765: GC_allocation_granularity = sysinfo.dwAllocationGranularity;
766: while (p < sysinfo.lpMaximumApplicationAddress) {
767: result = VirtualQuery(p, &buf, sizeof(buf));
768: if (result != sizeof(buf)) {
769: ABORT("Weird VirtualQuery result");
770: }
771: new_limit = (char *)p + buf.RegionSize;
772: protect = buf.Protect;
773: if (buf.State == MEM_COMMIT
774: && (protect == PAGE_EXECUTE_READWRITE
775: || protect == PAGE_READWRITE)
776: && !GC_is_heap_base(buf.AllocationBase)) {
777: if ((char *)p == limit) {
778: limit = new_limit;
779: } else {
780: GC_cond_add_roots(base, limit);
781: base = p;
782: limit = new_limit;
783: }
784: }
785: if (p > (LPVOID)new_limit /* overflow */) break;
786: p = (LPVOID)new_limit;
787: }
788: GC_cond_add_roots(base, limit);
789: }
790:
791: #endif /* MSWIN32 */
792:
793: #if defined(ALPHA) && defined(OSF1)
794:
795: #include <loader.h>
796:
797: void GC_register_dynamic_libraries()
798: {
799: int status;
800: ldr_process_t mypid;
801:
802: /* module */
803: ldr_module_t moduleid = LDR_NULL_MODULE;
804: ldr_module_info_t moduleinfo;
805: size_t moduleinfosize = sizeof(moduleinfo);
806: size_t modulereturnsize;
807:
808: /* region */
809: ldr_region_t region;
810: ldr_region_info_t regioninfo;
811: size_t regioninfosize = sizeof(regioninfo);
812: size_t regionreturnsize;
813:
814: /* Obtain id of this process */
815: mypid = ldr_my_process();
816:
817: /* For each module */
818: while (TRUE) {
819:
820: /* Get the next (first) module */
821: status = ldr_next_module(mypid, &moduleid);
822:
823: /* Any more modules? */
824: if (moduleid == LDR_NULL_MODULE)
825: break; /* No more modules */
826:
827: /* Check status AFTER checking moduleid because */
828: /* of a bug in the non-shared ldr_next_module stub */
829: if (status != 0 ) {
830: GC_printf1("dynamic_load: status = %ld\n", (long)status);
831: {
832: extern char *sys_errlist[];
833: extern int sys_nerr;
834: extern int errno;
835: if (errno <= sys_nerr) {
836: GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
837: } else {
838: GC_printf1("dynamic_load: %d\n", (long)errno);
839: }
840: }
841: ABORT("ldr_next_module failed");
842: }
843:
844: /* Get the module information */
845: status = ldr_inq_module(mypid, moduleid, &moduleinfo,
846: moduleinfosize, &modulereturnsize);
847: if (status != 0 )
848: ABORT("ldr_inq_module failed");
849:
850: /* is module for the main program (i.e. nonshared portion)? */
851: if (moduleinfo.lmi_flags & LDR_MAIN)
852: continue; /* skip the main module */
853:
854: # ifdef VERBOSE
855: GC_printf("---Module---\n");
856: GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
857: GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
858: GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
859: GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
860: # endif
861:
862: /* For each region in this module */
863: for (region = 0; region < moduleinfo.lmi_nregion; region++) {
864:
865: /* Get the region information */
866: status = ldr_inq_region(mypid, moduleid, region, ®ioninfo,
867: regioninfosize, ®ionreturnsize);
868: if (status != 0 )
869: ABORT("ldr_inq_region failed");
870:
871: /* only process writable (data) regions */
872: if (! (regioninfo.lri_prot & LDR_W))
873: continue;
874:
875: # ifdef VERBOSE
876: GC_printf("--- Region ---\n");
877: GC_printf("Region number = %16ld\n",
878: regioninfo.lri_region_no);
879: GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
880: GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
881: GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
882: GC_printf("Region size = %16ld\n", regioninfo.lri_size);
883: GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
884: # endif
885:
886: /* register region as a garbage collection root */
887: GC_add_roots_inner (
888: (char *)regioninfo.lri_mapaddr,
889: (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
890: TRUE);
891:
892: }
893: }
894: }
895: #endif
896:
897: #if defined(HPUX)
898:
899: #include <errno.h>
900: #include <dl.h>
901:
902: extern int errno;
903: extern char *sys_errlist[];
904: extern int sys_nerr;
905:
906: void GC_register_dynamic_libraries()
907: {
908: int status;
909: int index = 1; /* Ordinal position in shared library search list */
910: struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
911:
912: /* For each dynamic library loaded */
913: while (TRUE) {
914:
915: /* Get info about next shared library */
916: status = shl_get(index, &shl_desc);
917:
918: /* Check if this is the end of the list or if some error occured */
919: if (status != 0) {
920: # ifdef HPUX_THREADS
921: /* I've seen errno values of 0. The man page is not clear */
922: /* as to whether errno should get set on a -1 return. */
923: break;
924: # else
925: if (errno == EINVAL) {
926: break; /* Moved past end of shared library list --> finished */
927: } else {
928: if (errno <= sys_nerr) {
929: GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
930: } else {
931: GC_printf1("dynamic_load: %d\n", (long) errno);
932: }
933: ABORT("shl_get failed");
934: }
935: # endif
936: }
937:
938: # ifdef VERBOSE
939: GC_printf0("---Shared library---\n");
940: GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
941: GC_printf1("\tindex = %d\n", index);
942: GC_printf1("\thandle = %08x\n",
943: (unsigned long) shl_desc->handle);
944: GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
945: GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
946: GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
947: GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
948: GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
949: # endif
950:
951: /* register shared library's data segment as a garbage collection root */
952: GC_add_roots_inner((char *) shl_desc->dstart,
953: (char *) shl_desc->dend, TRUE);
954:
955: index++;
956: }
957: }
958: #endif /* HPUX */
959:
960: #ifdef RS6000
961: #pragma alloca
962: #include <sys/ldr.h>
963: #include <sys/errno.h>
964: void GC_register_dynamic_libraries()
965: {
966: int len;
967: char *ldibuf;
968: int ldibuflen;
969: struct ld_info *ldi;
970:
971: ldibuf = alloca(ldibuflen = 8192);
972:
973: while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
974: if (errno != ENOMEM) {
975: ABORT("loadquery failed");
976: }
977: ldibuf = alloca(ldibuflen *= 2);
978: }
979:
980: ldi = (struct ld_info *)ldibuf;
981: while (ldi) {
982: len = ldi->ldinfo_next;
983: GC_add_roots_inner(
984: ldi->ldinfo_dataorg,
985: (unsigned long)ldi->ldinfo_dataorg
986: + ldi->ldinfo_datasize,
987: TRUE);
988: ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
989: }
990: }
991: #endif /* RS6000 */
992:
993:
994:
995: #else /* !DYNAMIC_LOADING */
996:
997: #ifdef PCR
998:
999: # include "il/PCR_IL.h"
1000: # include "th/PCR_ThCtl.h"
1001: # include "mm/PCR_MM.h"
1002:
1003: void GC_register_dynamic_libraries()
1004: {
1005: /* Add new static data areas of dynamically loaded modules. */
1006: {
1007: PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
1008: PCR_IL_LoadedSegment * q;
1009:
1010: /* Skip uncommited files */
1011: while (p != NIL && !(p -> lf_commitPoint)) {
1012: /* The loading of this file has not yet been committed */
1013: /* Hence its description could be inconsistent. */
1014: /* Furthermore, it hasn't yet been run. Hence its data */
1015: /* segments can't possibly reference heap allocated */
1016: /* objects. */
1017: p = p -> lf_prev;
1018: }
1019: for (; p != NIL; p = p -> lf_prev) {
1020: for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
1021: if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
1022: == PCR_IL_SegFlags_Traced_on) {
1023: GC_add_roots_inner
1024: ((char *)(q -> ls_addr),
1025: (char *)(q -> ls_addr) + q -> ls_bytes,
1026: TRUE);
1027: }
1028: }
1029: }
1030: }
1031: }
1032:
1033:
1034: #else /* !PCR */
1035:
1036: void GC_register_dynamic_libraries(){}
1037:
1038: int GC_no_dynamic_loading;
1039:
1040: #endif /* !PCR */
1041: #endif /* !DYNAMIC_LOADING */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>