Annotation of OpenXM_contrib/gc/solaris_threads.c, Revision 1.1.1.1
1.1 maekawa 1: /*
2: * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3: *
4: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
6: *
7: * Permission is hereby granted to use or copy this program
8: * for any purpose, provided the above notices are retained on all copies.
9: * Permission to modify the code and to distribute modified code is granted,
10: * provided the above notices are retained, and a notice that the code was
11: * modified is included with the above copyright notice.
12: */
13: /*
14: * Support code for Solaris threads. Provides functionality we wish Sun
15: * had provided. Relies on some information we probably shouldn't rely on.
16: */
17: /* Boehm, September 14, 1994 4:44 pm PDT */
18:
19: # if defined(SOLARIS_THREADS)
20:
21: # include "gc_priv.h"
22: # include "solaris_threads.h"
23: # include <thread.h>
24: # include <synch.h>
25: # include <signal.h>
26: # include <fcntl.h>
27: # include <sys/types.h>
28: # include <sys/mman.h>
29: # include <sys/time.h>
30: # include <sys/resource.h>
31: # include <sys/stat.h>
32: # include <sys/syscall.h>
33: # include <sys/procfs.h>
34: # include <sys/lwp.h>
35: # include <sys/reg.h>
36: # define _CLASSIC_XOPEN_TYPES
37: # include <unistd.h>
38: # include <errno.h>
39:
40: /*
41: * This is the default size of the LWP arrays. If there are more LWPs
42: * than this when a stop-the-world GC happens, set_max_lwps will be
43: * called to cope.
44: * This must be higher than the number of LWPs at startup time.
45: * The threads library creates a thread early on, so the min. is 3
46: */
47: # define DEFAULT_MAX_LWPS 4
48:
49: #undef thr_join
50: #undef thr_create
51: #undef thr_suspend
52: #undef thr_continue
53:
54: cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
55: cond_t GC_create_cv; /* Signalled when a new undetached */
56: /* thread starts. */
57:
58:
59: #ifdef MMAP_STACKS
60: static int GC_zfd;
61: #endif /* MMAP_STACKS */
62:
63: /* We use the allocation lock to protect thread-related data structures. */
64:
65: /* We stop the world using /proc primitives. This makes some */
66: /* minimal assumptions about the threads implementation. */
67: /* We don't play by the rules, since the rules make this */
68: /* impossible (as of Solaris 2.3). Also note that as of */
69: /* Solaris 2.3 the various thread and lwp suspension */
70: /* primitives failed to stop threads by the time the request */
71: /* is completed. */
72:
73:
74: static sigset_t old_mask;
75:
76: /* Sleep for n milliseconds, n < 1000 */
77: void GC_msec_sleep(int n)
78: {
79: struct timespec ts;
80:
81: ts.tv_sec = 0;
82: ts.tv_nsec = 1000000*n;
83: if (syscall(SYS_nanosleep, &ts, 0) < 0) {
84: ABORT("nanosleep failed");
85: }
86: }
87: /* Turn off preemption; gross but effective. */
88: /* Caller has allocation lock. */
89: /* Actually this is not needed under Solaris 2.3 and */
90: /* 2.4, but hopefully that'll change. */
91: void preempt_off()
92: {
93: sigset_t set;
94:
95: (void)sigfillset(&set);
96: sigdelset(&set, SIGABRT);
97: syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
98: }
99:
100: void preempt_on()
101: {
102: syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
103: }
104:
105: int GC_main_proc_fd = -1;
106:
107:
108: struct lwp_cache_entry {
109: lwpid_t lc_id;
110: int lc_descr; /* /proc file descriptor. */
111: } GC_lwp_cache_default[DEFAULT_MAX_LWPS];
112:
113: static int max_lwps = DEFAULT_MAX_LWPS;
114: static struct lwp_cache_entry *GC_lwp_cache = GC_lwp_cache_default;
115:
116: static prgregset_t GC_lwp_registers_default[DEFAULT_MAX_LWPS];
117: static prgregset_t *GC_lwp_registers = GC_lwp_registers_default;
118:
119: /* Return a file descriptor for the /proc entry corresponding */
120: /* to the given lwp. The file descriptor may be stale if the */
121: /* lwp exited and a new one was forked. */
122: static int open_lwp(lwpid_t id)
123: {
124: int result;
125: static int next_victim = 0;
126: register int i;
127:
128: for (i = 0; i < max_lwps; i++) {
129: if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);
130: }
131: result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
132: /*
133: * If PIOCOPENLWP fails, try closing fds in the cache until it succeeds.
134: */
135: if (result < 0 && errno == EMFILE) {
136: for (i = 0; i < max_lwps; i++) {
137: if (GC_lwp_cache[i].lc_id != 0) {
138: (void)syscall(SYS_close, GC_lwp_cache[i].lc_descr);
139: result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
140: if (result >= 0 || (result < 0 && errno != EMFILE))
141: break;
142: }
143: }
144: }
145: if (result < 0) {
146: if (errno == EMFILE) {
147: ABORT("Too many open files");
148: }
149: return(-1) /* exited? */;
150: }
151: if (GC_lwp_cache[next_victim].lc_id != 0)
152: (void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);
153: GC_lwp_cache[next_victim].lc_id = id;
154: GC_lwp_cache[next_victim].lc_descr = result;
155: if (++next_victim >= max_lwps)
156: next_victim = 0;
157: return(result);
158: }
159:
160: static void uncache_lwp(lwpid_t id)
161: {
162: register int i;
163:
164: for (i = 0; i < max_lwps; i++) {
165: if (GC_lwp_cache[i].lc_id == id) {
166: (void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);
167: GC_lwp_cache[i].lc_id = 0;
168: break;
169: }
170: }
171: }
172: /* Sequence of current lwp ids */
173: static lwpid_t GC_current_ids_default[DEFAULT_MAX_LWPS + 1];
174: static lwpid_t *GC_current_ids = GC_current_ids_default;
175:
176: /* Temporary used below (can be big if large number of LWPs) */
177: static lwpid_t last_ids_default[DEFAULT_MAX_LWPS + 1];
178: static lwpid_t *last_ids = last_ids_default;
179:
180:
181: #define ROUNDUP(n) WORDS_TO_BYTES(ROUNDED_UP_WORDS(n))
182:
183: static void set_max_lwps(GC_word n)
184: {
185: char *mem;
186: char *oldmem;
187: int required_bytes = ROUNDUP(n * sizeof(struct lwp_cache_entry))
188: + ROUNDUP(n * sizeof(prgregset_t))
189: + ROUNDUP((n + 1) * sizeof(lwpid_t))
190: + ROUNDUP((n + 1) * sizeof(lwpid_t));
191:
192: GC_expand_hp_inner(divHBLKSZ((word)required_bytes));
193: oldmem = mem = GC_scratch_alloc(required_bytes);
194: if (0 == mem) ABORT("No space for lwp data structures");
195:
196: /*
197: * We can either flush the old lwp cache or copy it over. Do the latter.
198: */
199: memcpy(mem, GC_lwp_cache, max_lwps * sizeof(struct lwp_cache_entry));
200: GC_lwp_cache = (struct lwp_cache_entry*)mem;
201: mem += ROUNDUP(n * sizeof(struct lwp_cache_entry));
202:
203: BZERO(GC_lwp_registers, max_lwps * sizeof(GC_lwp_registers[0]));
204: GC_lwp_registers = (prgregset_t *)mem;
205: mem += ROUNDUP(n * sizeof(prgregset_t));
206:
207:
208: GC_current_ids = (lwpid_t *)mem;
209: mem += ROUNDUP((n + 1) * sizeof(lwpid_t));
210:
211: last_ids = (lwpid_t *)mem;
212: mem += ROUNDUP((n + 1)* sizeof(lwpid_t));
213:
214: if (mem > oldmem + required_bytes)
215: ABORT("set_max_lwps buffer overflow");
216:
217: max_lwps = n;
218: }
219:
220:
221: /* Stop all lwps in process. Assumes preemption is off. */
222: /* Caller has allocation lock (and any other locks he may */
223: /* need). */
224: static void stop_all_lwps()
225: {
226: int lwp_fd;
227: char buf[30];
228: prstatus_t status;
229: register int i;
230: GC_bool changed;
231: lwpid_t me = _lwp_self();
232:
233: if (GC_main_proc_fd == -1) {
234: sprintf(buf, "/proc/%d", getpid());
235: GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);
236: if (GC_main_proc_fd < 0) {
237: if (errno == EMFILE)
238: ABORT("/proc open failed: too many open files");
239: GC_printf1("/proc open failed: errno %d", errno);
240: abort();
241: }
242: }
243: BZERO(GC_lwp_registers, sizeof (prgregset_t) * max_lwps);
244: for (i = 0; i < max_lwps; i++)
245: last_ids[i] = 0;
246: for (;;) {
247: if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)
248: ABORT("Main PIOCSTATUS failed");
249: if (status.pr_nlwp < 1)
250: ABORT("Invalid number of lwps returned by PIOCSTATUS");
251: if (status.pr_nlwp >= max_lwps) {
252: set_max_lwps(status.pr_nlwp*2 + 10);
253: /*
254: * The data in the old GC_current_ids and
255: * GC_lwp_registers has been trashed. Cleaning out last_ids
256: * will make sure every LWP gets re-examined.
257: */
258: for (i = 0; i < max_lwps; i++)
259: last_ids[i] = 0;
260: continue;
261: }
262: if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0)
263: ABORT("PIOCLWPIDS failed");
264: changed = FALSE;
265: for (i = 0; GC_current_ids[i] != 0 && i < max_lwps; i++) {
266: if (GC_current_ids[i] != last_ids[i]) {
267: changed = TRUE;
268: if (GC_current_ids[i] != me) {
269: /* PIOCSTOP doesn't work without a writable */
270: /* descriptor. And that makes the process */
271: /* undebuggable. */
272: if (_lwp_suspend(GC_current_ids[i]) < 0) {
273: /* Could happen if the lwp exited */
274: uncache_lwp(GC_current_ids[i]);
275: GC_current_ids[i] = me; /* ignore */
276: }
277: }
278: }
279: }
280: /*
281: * In the unlikely event something does a fork between the
282: * PIOCSTATUS and the PIOCLWPIDS.
283: */
284: if (i >= max_lwps)
285: continue;
286: /* All lwps in GC_current_ids != me have been suspended. Note */
287: /* that _lwp_suspend is idempotent. */
288: for (i = 0; GC_current_ids[i] != 0; i++) {
289: if (GC_current_ids[i] != last_ids[i]) {
290: if (GC_current_ids[i] != me) {
291: lwp_fd = open_lwp(GC_current_ids[i]);
292: if (lwp_fd == -1)
293: {
294: GC_current_ids[i] = me;
295: continue;
296: }
297: /* LWP should be stopped. Empirically it sometimes */
298: /* isn't, and more frequently the PR_STOPPED flag */
299: /* is not set. Wait for PR_STOPPED. */
300: if (syscall(SYS_ioctl, lwp_fd,
301: PIOCSTATUS, &status) < 0) {
302: /* Possible if the descriptor was stale, or */
303: /* we encountered the 2.3 _lwp_suspend bug. */
304: uncache_lwp(GC_current_ids[i]);
305: GC_current_ids[i] = me; /* handle next time. */
306: } else {
307: while (!(status.pr_flags & PR_STOPPED)) {
308: GC_msec_sleep(1);
309: if (syscall(SYS_ioctl, lwp_fd,
310: PIOCSTATUS, &status) < 0) {
311: ABORT("Repeated PIOCSTATUS failed");
312: }
313: if (status.pr_flags & PR_STOPPED) break;
314:
315: GC_msec_sleep(20);
316: if (syscall(SYS_ioctl, lwp_fd,
317: PIOCSTATUS, &status) < 0) {
318: ABORT("Repeated PIOCSTATUS failed");
319: }
320: }
321: if (status.pr_who != GC_current_ids[i]) {
322: /* can happen if thread was on death row */
323: uncache_lwp(GC_current_ids[i]);
324: GC_current_ids[i] = me; /* handle next time. */
325: continue;
326: }
327: /* Save registers where collector can */
328: /* find them. */
329: BCOPY(status.pr_reg, GC_lwp_registers[i],
330: sizeof (prgregset_t));
331: }
332: }
333: }
334: }
335: if (!changed) break;
336: for (i = 0; i < max_lwps; i++) last_ids[i] = GC_current_ids[i];
337: }
338: }
339:
340: /* Restart all lwps in process. Assumes preemption is off. */
341: static void restart_all_lwps()
342: {
343: int lwp_fd;
344: register int i;
345: GC_bool changed;
346: lwpid_t me = _lwp_self();
347: # define PARANOID
348:
349: for (i = 0; GC_current_ids[i] != 0; i++) {
350: # ifdef PARANOID
351: if (GC_current_ids[i] != me) {
352: int lwp_fd = open_lwp(GC_current_ids[i]);
353: prstatus_t status;
354:
355: if (lwp_fd < 0) ABORT("open_lwp failed");
356: if (syscall(SYS_ioctl, lwp_fd,
357: PIOCSTATUS, &status) < 0) {
358: ABORT("PIOCSTATUS failed in restart_all_lwps");
359: }
360: if (memcmp(status.pr_reg, GC_lwp_registers[i],
361: sizeof (prgregset_t)) != 0) {
362: int j;
363:
364: for(j = 0; j < NGREG; j++)
365: {
366: GC_printf3("%i: %x -> %x\n", j,
367: GC_lwp_registers[i][j],
368: status.pr_reg[j]);
369: }
370: ABORT("Register contents changed");
371: }
372: if (!status.pr_flags & PR_STOPPED) {
373: ABORT("lwp no longer stopped");
374: }
375: #ifdef SPARC
376: {
377: gwindows_t windows;
378: if (syscall(SYS_ioctl, lwp_fd,
379: PIOCGWIN, &windows) < 0) {
380: ABORT("PIOCSTATUS failed in restart_all_lwps");
381: }
382: if (windows.wbcnt > 0) ABORT("unsaved register windows");
383: }
384: #endif
385: }
386: # endif /* PARANOID */
387: if (GC_current_ids[i] == me) continue;
388: if (_lwp_continue(GC_current_ids[i]) < 0) {
389: ABORT("Failed to restart lwp");
390: }
391: }
392: if (i >= max_lwps) ABORT("Too many lwps");
393: }
394:
395: GC_bool GC_multithreaded = 0;
396:
397: void GC_stop_world()
398: {
399: preempt_off();
400: if (GC_multithreaded)
401: stop_all_lwps();
402: }
403:
404: void GC_start_world()
405: {
406: if (GC_multithreaded)
407: restart_all_lwps();
408: preempt_on();
409: }
410:
411: void GC_thr_init(void);
412:
413: GC_bool GC_thr_initialized = FALSE;
414:
415: size_t GC_min_stack_sz;
416:
417: size_t GC_page_sz;
418:
419: /*
420: * stack_head is stored at the top of free stacks
421: */
422: struct stack_head {
423: struct stack_head *next;
424: ptr_t base;
425: thread_t owner;
426: };
427:
428: # define N_FREE_LISTS 25
429: struct stack_head *GC_stack_free_lists[N_FREE_LISTS] = { 0 };
430: /* GC_stack_free_lists[i] is free list for stacks of */
431: /* size GC_min_stack_sz*2**i. */
432: /* Free lists are linked through stack_head stored */ /* at top of stack. */
433:
434: /* Return a stack of size at least *stack_size. *stack_size is */
435: /* replaced by the actual stack size. */
436: /* Caller holds allocation lock. */
437: ptr_t GC_stack_alloc(size_t * stack_size)
438: {
439: register size_t requested_sz = *stack_size;
440: register size_t search_sz = GC_min_stack_sz;
441: register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
442: register ptr_t base;
443: register struct stack_head *result;
444:
445: while (search_sz < requested_sz) {
446: search_sz *= 2;
447: index++;
448: }
449: if ((result = GC_stack_free_lists[index]) == 0
450: && (result = GC_stack_free_lists[index+1]) != 0) {
451: /* Try next size up. */
452: search_sz *= 2; index++;
453: }
454: if (result != 0) {
455: base = GC_stack_free_lists[index]->base;
456: GC_stack_free_lists[index] = GC_stack_free_lists[index]->next;
457: } else {
458: #ifdef MMAP_STACKS
459: base = (ptr_t)mmap(0, search_sz + GC_page_sz,
460: PROT_READ|PROT_WRITE, MAP_PRIVATE |MAP_NORESERVE,
461: GC_zfd, 0);
462: if (base == (ptr_t)-1)
463: {
464: *stack_size = 0;
465: return NULL;
466: }
467:
468: mprotect(base, GC_page_sz, PROT_NONE);
469: /* Should this use divHBLKSZ(search_sz + GC_page_sz) ? -- cf */
470: GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
471: base += GC_page_sz;
472:
473: #else
474: base = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_sz);
475: if (base == NULL)
476: {
477: *stack_size = 0;
478: return NULL;
479: }
480:
481: base = (ptr_t)(((word)base + GC_page_sz) & ~(GC_page_sz - 1));
482: /* Protect hottest page to detect overflow. */
483: # ifdef SOLARIS23_MPROTECT_BUG_FIXED
484: mprotect(base, GC_page_sz, PROT_NONE);
485: # endif
486: GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
487:
488: base += GC_page_sz;
489: #endif
490: }
491: *stack_size = search_sz;
492: return(base);
493: }
494:
495: /* Caller holds allocationlock. */
496: void GC_stack_free(ptr_t stack, size_t size)
497: {
498: register int index = 0;
499: register size_t search_sz = GC_min_stack_sz;
500: register struct stack_head *head;
501:
502: #ifdef MMAP_STACKS
503: /* Zero pointers */
504: mmap(stack, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED,
505: GC_zfd, 0);
506: #endif
507: while (search_sz < size) {
508: search_sz *= 2;
509: index++;
510: }
511: if (search_sz != size) ABORT("Bad stack size");
512:
513: head = (struct stack_head *)(stack + search_sz - sizeof(struct stack_head));
514: head->next = GC_stack_free_lists[index];
515: head->base = stack;
516: GC_stack_free_lists[index] = head;
517: }
518:
519: void GC_my_stack_limits();
520:
521: /* Notify virtual dirty bit implementation that known empty parts of */
522: /* stacks do not contain useful data. */
523: /* Caller holds allocation lock. */
524: void GC_old_stacks_are_fresh()
525: {
526: /* No point in doing this for MMAP stacks - and pointers are zero'd out */
527: /* by the mmap in GC_stack_free */
528: #ifndef MMAP_STACKS
529: register int i;
530: register struct stack_head *s;
531: register ptr_t p;
532: register size_t sz;
533: register struct hblk * h;
534: int dummy;
535:
536: for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;
537: i++, sz *= 2) {
538: for (s = GC_stack_free_lists[i]; s != 0; s = s->next) {
539: p = s->base;
540: h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));
541: if ((ptr_t)h == p) {
542: GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));
543: } else {
544: GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);
545: BZERO(p, (ptr_t)h - p);
546: }
547: }
548: }
549: #endif /* MMAP_STACKS */
550: GC_my_stack_limits();
551: }
552:
553: /* The set of all known threads. We intercept thread creation and */
554: /* joins. We never actually create detached threads. We allocate all */
555: /* new thread stacks ourselves. These allow us to maintain this */
556: /* data structure. */
557:
558: # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
559: volatile GC_thread GC_threads[THREAD_TABLE_SZ];
560:
561: /* Add a thread to GC_threads. We assume it wasn't already there. */
562: /* Caller holds allocation lock. */
563: GC_thread GC_new_thread(thread_t id)
564: {
565: int hv = ((word)id) % THREAD_TABLE_SZ;
566: GC_thread result;
567: static struct GC_Thread_Rep first_thread;
568: static GC_bool first_thread_used = FALSE;
569:
570: if (!first_thread_used) {
571: result = &first_thread;
572: first_thread_used = TRUE;
573: /* Dont acquire allocation lock, since we may already hold it. */
574: } else {
575: result = (struct GC_Thread_Rep *)
576: GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
577: }
578: if (result == 0) return(0);
579: result -> id = id;
580: result -> next = GC_threads[hv];
581: GC_threads[hv] = result;
582: /* result -> finished = 0; */
583: (void) cond_init(&(result->join_cv), USYNC_THREAD, 0);
584: return(result);
585: }
586:
587: /* Delete a thread from GC_threads. We assume it is there. */
588: /* (The code intentionally traps if it wasn't.) */
589: /* Caller holds allocation lock. */
590: void GC_delete_thread(thread_t id)
591: {
592: int hv = ((word)id) % THREAD_TABLE_SZ;
593: register GC_thread p = GC_threads[hv];
594: register GC_thread prev = 0;
595:
596: while (p -> id != id) {
597: prev = p;
598: p = p -> next;
599: }
600: if (prev == 0) {
601: GC_threads[hv] = p -> next;
602: } else {
603: prev -> next = p -> next;
604: }
605: }
606:
607: /* Return the GC_thread correpsonding to a given thread_t. */
608: /* Returns 0 if it's not there. */
609: /* Caller holds allocation lock. */
610: GC_thread GC_lookup_thread(thread_t id)
611: {
612: int hv = ((word)id) % THREAD_TABLE_SZ;
613: register GC_thread p = GC_threads[hv];
614:
615: while (p != 0 && p -> id != id) p = p -> next;
616: return(p);
617: }
618:
619: /* Notify dirty bit implementation of unused parts of my stack. */
620: /* Caller holds allocation lock. */
621: void GC_my_stack_limits()
622: {
623: int dummy;
624: register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));
625: register GC_thread me = GC_lookup_thread(thr_self());
626: register size_t stack_size = me -> stack_size;
627: register ptr_t stack;
628:
629: if (stack_size == 0) {
630: /* original thread */
631: struct rlimit rl;
632:
633: if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
634: /* Empirically, what should be the stack page with lowest */
635: /* address is actually inaccessible. */
636: stack_size = ((word)rl.rlim_cur & ~(HBLKSIZE-1)) - GC_page_sz;
637: stack = GC_stackbottom - stack_size + GC_page_sz;
638: } else {
639: stack = me -> stack;
640: }
641: if (stack > hottest || stack + stack_size < hottest) {
642: ABORT("sp out of bounds");
643: }
644: GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));
645: }
646:
647:
648: /* We hold allocation lock. We assume the world is stopped. */
649: void GC_push_all_stacks()
650: {
651: register int i;
652: register GC_thread p;
653: register ptr_t sp = GC_approx_sp();
654: register ptr_t bottom, top;
655: struct rlimit rl;
656:
657: # define PUSH(bottom,top) \
658: if (GC_dirty_maintained) { \
659: GC_push_dirty((bottom), (top), GC_page_was_ever_dirty, \
660: GC_push_all_stack); \
661: } else { \
662: GC_push_all_stack((bottom), (top)); \
663: }
664: GC_push_all_stack((ptr_t)GC_lwp_registers,
665: (ptr_t)GC_lwp_registers
666: + max_lwps * sizeof(GC_lwp_registers[0]));
667: for (i = 0; i < THREAD_TABLE_SZ; i++) {
668: for (p = GC_threads[i]; p != 0; p = p -> next) {
669: if (p -> stack_size != 0) {
670: bottom = p -> stack;
671: top = p -> stack + p -> stack_size;
672: } else {
673: /* The original stack. */
674: if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
675: bottom = GC_stackbottom - rl.rlim_cur + GC_page_sz;
676: top = GC_stackbottom;
677: }
678: if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
679: PUSH(bottom, top);
680: }
681: }
682: }
683:
684:
685: int GC_is_thread_stack(ptr_t addr)
686: {
687: register int i;
688: register GC_thread p;
689: register ptr_t bottom, top;
690: struct rlimit rl;
691:
692: for (i = 0; i < THREAD_TABLE_SZ; i++) {
693: for (p = GC_threads[i]; p != 0; p = p -> next) {
694: if (p -> stack_size != 0) {
695: if (p -> stack <= addr &&
696: addr < p -> stack + p -> stack_size)
697: return 1;
698: }
699: }
700: }
701: }
702:
703: /* The only thread that ever really performs a thr_join. */
704: void * GC_thr_daemon(void * dummy)
705: {
706: void *status;
707: thread_t departed;
708: register GC_thread t;
709: register int i;
710: register int result;
711:
712: for(;;) {
713: start:
714: result = thr_join((thread_t)0, &departed, &status);
715: LOCK();
716: if (result != 0) {
717: /* No more threads; wait for create. */
718: for (i = 0; i < THREAD_TABLE_SZ; i++) {
719: for (t = GC_threads[i]; t != 0; t = t -> next) {
720: if (!(t -> flags & (DETACHED | FINISHED))) {
721: UNLOCK();
722: goto start; /* Thread started just before we */
723: /* acquired the lock. */
724: }
725: }
726: }
727: cond_wait(&GC_create_cv, &GC_allocate_ml);
728: UNLOCK();
729: } else {
730: t = GC_lookup_thread(departed);
731: GC_multithreaded--;
732: if (!(t -> flags & CLIENT_OWNS_STACK)) {
733: GC_stack_free(t -> stack, t -> stack_size);
734: }
735: if (t -> flags & DETACHED) {
736: GC_delete_thread(departed);
737: } else {
738: t -> status = status;
739: t -> flags |= FINISHED;
740: cond_signal(&(t -> join_cv));
741: cond_broadcast(&GC_prom_join_cv);
742: }
743: UNLOCK();
744: }
745: }
746: }
747:
748: /* We hold the allocation lock, or caller ensures that 2 instances */
749: /* cannot be invoked concurrently. */
750: void GC_thr_init(void)
751: {
752: GC_thread t;
753: thread_t tid;
754:
755: if (GC_thr_initialized)
756: return;
757: GC_thr_initialized = TRUE;
758: GC_min_stack_sz = ((thr_min_stack() + 32*1024 + HBLKSIZE-1)
759: & ~(HBLKSIZE - 1));
760: GC_page_sz = sysconf(_SC_PAGESIZE);
761: #ifdef MMAP_STACKS
762: GC_zfd = open("/dev/zero", O_RDONLY);
763: if (GC_zfd == -1)
764: ABORT("Can't open /dev/zero");
765: #endif /* MMAP_STACKS */
766: cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
767: cond_init(&GC_create_cv, USYNC_THREAD, 0);
768: /* Add the initial thread, so we can stop it. */
769: t = GC_new_thread(thr_self());
770: t -> stack_size = 0;
771: t -> flags = DETACHED | CLIENT_OWNS_STACK;
772: if (thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,
773: 0 /* arg */, THR_DETACHED | THR_DAEMON,
774: &tid /* thread_id */) != 0) {
775: ABORT("Cant fork daemon");
776: }
777: thr_setprio(tid, 126);
778: }
779:
780: /* We acquire the allocation lock to prevent races with */
781: /* stopping/starting world. */
782: /* This is no more correct than the underlying Solaris 2.X */
783: /* implementation. Under 2.3 THIS IS BROKEN. */
784: int GC_thr_suspend(thread_t target_thread)
785: {
786: GC_thread t;
787: int result;
788:
789: LOCK();
790: result = thr_suspend(target_thread);
791: if (result == 0) {
792: t = GC_lookup_thread(target_thread);
793: if (t == 0) ABORT("thread unknown to GC");
794: t -> flags |= SUSPENDED;
795: }
796: UNLOCK();
797: return(result);
798: }
799:
800: int GC_thr_continue(thread_t target_thread)
801: {
802: GC_thread t;
803: int result;
804:
805: LOCK();
806: result = thr_continue(target_thread);
807: if (result == 0) {
808: t = GC_lookup_thread(target_thread);
809: if (t == 0) ABORT("thread unknown to GC");
810: t -> flags &= ~SUSPENDED;
811: }
812: UNLOCK();
813: return(result);
814: }
815:
816: int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
817: {
818: register GC_thread t;
819: int result = 0;
820:
821: LOCK();
822: if (wait_for == 0) {
823: register int i;
824: register GC_bool thread_exists;
825:
826: for (;;) {
827: thread_exists = FALSE;
828: for (i = 0; i < THREAD_TABLE_SZ; i++) {
829: for (t = GC_threads[i]; t != 0; t = t -> next) {
830: if (!(t -> flags & DETACHED)) {
831: if (t -> flags & FINISHED) {
832: goto found;
833: }
834: thread_exists = TRUE;
835: }
836: }
837: }
838: if (!thread_exists) {
839: result = ESRCH;
840: goto out;
841: }
842: cond_wait(&GC_prom_join_cv, &GC_allocate_ml);
843: }
844: } else {
845: t = GC_lookup_thread(wait_for);
846: if (t == 0 || t -> flags & DETACHED) {
847: result = ESRCH;
848: goto out;
849: }
850: if (wait_for == thr_self()) {
851: result = EDEADLK;
852: goto out;
853: }
854: while (!(t -> flags & FINISHED)) {
855: cond_wait(&(t -> join_cv), &GC_allocate_ml);
856: }
857:
858: }
859: found:
860: if (status) *status = t -> status;
861: if (departed) *departed = t -> id;
862: cond_destroy(&(t -> join_cv));
863: GC_delete_thread(t -> id);
864: out:
865: UNLOCK();
866: return(result);
867: }
868:
869:
870: int
871: GC_thr_create(void *stack_base, size_t stack_size,
872: void *(*start_routine)(void *), void *arg, long flags,
873: thread_t *new_thread)
874: {
875: int result;
876: GC_thread t;
877: thread_t my_new_thread;
878: word my_flags = 0;
879: void * stack = stack_base;
880:
881: LOCK();
882: if (!GC_thr_initialized)
883: {
884: GC_thr_init();
885: }
886: GC_multithreaded++;
887: if (stack == 0) {
888: if (stack_size == 0) stack_size = GC_min_stack_sz;
889: stack = (void *)GC_stack_alloc(&stack_size);
890: if (stack == 0) {
891: GC_multithreaded--;
892: UNLOCK();
893: return(ENOMEM);
894: }
895: } else {
896: my_flags |= CLIENT_OWNS_STACK;
897: }
898: if (flags & THR_DETACHED) my_flags |= DETACHED;
899: if (flags & THR_SUSPENDED) my_flags |= SUSPENDED;
900: result = thr_create(stack, stack_size, start_routine,
901: arg, flags & ~THR_DETACHED, &my_new_thread);
902: if (result == 0) {
903: t = GC_new_thread(my_new_thread);
904: t -> flags = my_flags;
905: if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);
906: t -> stack = stack;
907: t -> stack_size = stack_size;
908: if (new_thread != 0) *new_thread = my_new_thread;
909: cond_signal(&GC_create_cv);
910: } else {
911: GC_multithreaded--;
912: if (!(my_flags & CLIENT_OWNS_STACK)) {
913: GC_stack_free(stack, stack_size);
914: }
915: }
916: UNLOCK();
917: return(result);
918: }
919:
920: # else /* SOLARIS_THREADS */
921:
922: #ifndef LINT
923: int GC_no_sunOS_threads;
924: #endif
925: #endif
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>