Annotation of OpenXM_contrib/gc/solaris_threads.c, Revision 1.1.1.3
1.1 maekawa 1: /*
2: * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3: *
4: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
6: *
7: * Permission is hereby granted to use or copy this program
8: * for any purpose, provided the above notices are retained on all copies.
9: * Permission to modify the code and to distribute modified code is granted,
10: * provided the above notices are retained, and a notice that the code was
11: * modified is included with the above copyright notice.
12: */
13: /*
14: * Support code for Solaris threads. Provides functionality we wish Sun
15: * had provided. Relies on some information we probably shouldn't rely on.
16: */
17: /* Boehm, September 14, 1994 4:44 pm PDT */
18:
19: # if defined(SOLARIS_THREADS)
20:
21: # include "gc_priv.h"
22: # include "solaris_threads.h"
23: # include <thread.h>
24: # include <synch.h>
25: # include <signal.h>
26: # include <fcntl.h>
27: # include <sys/types.h>
28: # include <sys/mman.h>
29: # include <sys/time.h>
30: # include <sys/resource.h>
31: # include <sys/stat.h>
32: # include <sys/syscall.h>
33: # include <sys/procfs.h>
34: # include <sys/lwp.h>
35: # include <sys/reg.h>
36: # define _CLASSIC_XOPEN_TYPES
37: # include <unistd.h>
38: # include <errno.h>
39:
40: /*
41: * This is the default size of the LWP arrays. If there are more LWPs
42: * than this when a stop-the-world GC happens, set_max_lwps will be
43: * called to cope.
44: * This must be higher than the number of LWPs at startup time.
45: * The threads library creates a thread early on, so the min. is 3
46: */
47: # define DEFAULT_MAX_LWPS 4
48:
49: #undef thr_join
50: #undef thr_create
51: #undef thr_suspend
52: #undef thr_continue
53:
54: cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
55: cond_t GC_create_cv; /* Signalled when a new undetached */
56: /* thread starts. */
57:
58:
59: #ifdef MMAP_STACKS
60: static int GC_zfd;
61: #endif /* MMAP_STACKS */
62:
63: /* We use the allocation lock to protect thread-related data structures. */
64:
65: /* We stop the world using /proc primitives. This makes some */
66: /* minimal assumptions about the threads implementation. */
67: /* We don't play by the rules, since the rules make this */
68: /* impossible (as of Solaris 2.3). Also note that as of */
69: /* Solaris 2.3 the various thread and lwp suspension */
70: /* primitives failed to stop threads by the time the request */
71: /* is completed. */
72:
73:
74: static sigset_t old_mask;
75:
76: /* Sleep for n milliseconds, n < 1000 */
77: void GC_msec_sleep(int n)
78: {
79: struct timespec ts;
80:
81: ts.tv_sec = 0;
82: ts.tv_nsec = 1000000*n;
83: if (syscall(SYS_nanosleep, &ts, 0) < 0) {
84: ABORT("nanosleep failed");
85: }
86: }
87: /* Turn off preemption; gross but effective. */
88: /* Caller has allocation lock. */
89: /* Actually this is not needed under Solaris 2.3 and */
90: /* 2.4, but hopefully that'll change. */
91: void preempt_off()
92: {
93: sigset_t set;
94:
95: (void)sigfillset(&set);
96: sigdelset(&set, SIGABRT);
97: syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
98: }
99:
100: void preempt_on()
101: {
102: syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
103: }
104:
105: int GC_main_proc_fd = -1;
106:
107:
108: struct lwp_cache_entry {
109: lwpid_t lc_id;
110: int lc_descr; /* /proc file descriptor. */
111: } GC_lwp_cache_default[DEFAULT_MAX_LWPS];
112:
113: static int max_lwps = DEFAULT_MAX_LWPS;
114: static struct lwp_cache_entry *GC_lwp_cache = GC_lwp_cache_default;
115:
116: static prgregset_t GC_lwp_registers_default[DEFAULT_MAX_LWPS];
117: static prgregset_t *GC_lwp_registers = GC_lwp_registers_default;
118:
119: /* Return a file descriptor for the /proc entry corresponding */
120: /* to the given lwp. The file descriptor may be stale if the */
121: /* lwp exited and a new one was forked. */
122: static int open_lwp(lwpid_t id)
123: {
124: int result;
125: static int next_victim = 0;
126: register int i;
127:
128: for (i = 0; i < max_lwps; i++) {
129: if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);
130: }
131: result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
132: /*
133: * If PIOCOPENLWP fails, try closing fds in the cache until it succeeds.
134: */
135: if (result < 0 && errno == EMFILE) {
136: for (i = 0; i < max_lwps; i++) {
137: if (GC_lwp_cache[i].lc_id != 0) {
138: (void)syscall(SYS_close, GC_lwp_cache[i].lc_descr);
139: result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
140: if (result >= 0 || (result < 0 && errno != EMFILE))
141: break;
142: }
143: }
144: }
145: if (result < 0) {
146: if (errno == EMFILE) {
147: ABORT("Too many open files");
148: }
149: return(-1) /* exited? */;
150: }
151: if (GC_lwp_cache[next_victim].lc_id != 0)
152: (void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);
153: GC_lwp_cache[next_victim].lc_id = id;
154: GC_lwp_cache[next_victim].lc_descr = result;
155: if (++next_victim >= max_lwps)
156: next_victim = 0;
157: return(result);
158: }
159:
160: static void uncache_lwp(lwpid_t id)
161: {
162: register int i;
163:
164: for (i = 0; i < max_lwps; i++) {
165: if (GC_lwp_cache[i].lc_id == id) {
166: (void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);
167: GC_lwp_cache[i].lc_id = 0;
168: break;
169: }
170: }
171: }
172: /* Sequence of current lwp ids */
173: static lwpid_t GC_current_ids_default[DEFAULT_MAX_LWPS + 1];
174: static lwpid_t *GC_current_ids = GC_current_ids_default;
175:
176: /* Temporary used below (can be big if large number of LWPs) */
177: static lwpid_t last_ids_default[DEFAULT_MAX_LWPS + 1];
178: static lwpid_t *last_ids = last_ids_default;
179:
180:
181: #define ROUNDUP(n) WORDS_TO_BYTES(ROUNDED_UP_WORDS(n))
182:
183: static void set_max_lwps(GC_word n)
184: {
185: char *mem;
186: char *oldmem;
187: int required_bytes = ROUNDUP(n * sizeof(struct lwp_cache_entry))
188: + ROUNDUP(n * sizeof(prgregset_t))
189: + ROUNDUP((n + 1) * sizeof(lwpid_t))
190: + ROUNDUP((n + 1) * sizeof(lwpid_t));
191:
192: GC_expand_hp_inner(divHBLKSZ((word)required_bytes));
193: oldmem = mem = GC_scratch_alloc(required_bytes);
194: if (0 == mem) ABORT("No space for lwp data structures");
195:
196: /*
197: * We can either flush the old lwp cache or copy it over. Do the latter.
198: */
199: memcpy(mem, GC_lwp_cache, max_lwps * sizeof(struct lwp_cache_entry));
200: GC_lwp_cache = (struct lwp_cache_entry*)mem;
201: mem += ROUNDUP(n * sizeof(struct lwp_cache_entry));
202:
203: BZERO(GC_lwp_registers, max_lwps * sizeof(GC_lwp_registers[0]));
204: GC_lwp_registers = (prgregset_t *)mem;
205: mem += ROUNDUP(n * sizeof(prgregset_t));
206:
207:
208: GC_current_ids = (lwpid_t *)mem;
209: mem += ROUNDUP((n + 1) * sizeof(lwpid_t));
210:
211: last_ids = (lwpid_t *)mem;
212: mem += ROUNDUP((n + 1)* sizeof(lwpid_t));
213:
214: if (mem > oldmem + required_bytes)
215: ABORT("set_max_lwps buffer overflow");
216:
217: max_lwps = n;
218: }
219:
220:
221: /* Stop all lwps in process. Assumes preemption is off. */
222: /* Caller has allocation lock (and any other locks he may */
223: /* need). */
224: static void stop_all_lwps()
225: {
226: int lwp_fd;
227: char buf[30];
228: prstatus_t status;
229: register int i;
230: GC_bool changed;
231: lwpid_t me = _lwp_self();
232:
233: if (GC_main_proc_fd == -1) {
234: sprintf(buf, "/proc/%d", getpid());
235: GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);
236: if (GC_main_proc_fd < 0) {
237: if (errno == EMFILE)
238: ABORT("/proc open failed: too many open files");
239: GC_printf1("/proc open failed: errno %d", errno);
240: abort();
241: }
242: }
243: BZERO(GC_lwp_registers, sizeof (prgregset_t) * max_lwps);
244: for (i = 0; i < max_lwps; i++)
245: last_ids[i] = 0;
246: for (;;) {
247: if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)
248: ABORT("Main PIOCSTATUS failed");
249: if (status.pr_nlwp < 1)
250: ABORT("Invalid number of lwps returned by PIOCSTATUS");
251: if (status.pr_nlwp >= max_lwps) {
252: set_max_lwps(status.pr_nlwp*2 + 10);
253: /*
254: * The data in the old GC_current_ids and
255: * GC_lwp_registers has been trashed. Cleaning out last_ids
256: * will make sure every LWP gets re-examined.
257: */
258: for (i = 0; i < max_lwps; i++)
259: last_ids[i] = 0;
260: continue;
261: }
262: if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0)
263: ABORT("PIOCLWPIDS failed");
264: changed = FALSE;
265: for (i = 0; GC_current_ids[i] != 0 && i < max_lwps; i++) {
266: if (GC_current_ids[i] != last_ids[i]) {
267: changed = TRUE;
268: if (GC_current_ids[i] != me) {
269: /* PIOCSTOP doesn't work without a writable */
270: /* descriptor. And that makes the process */
271: /* undebuggable. */
272: if (_lwp_suspend(GC_current_ids[i]) < 0) {
273: /* Could happen if the lwp exited */
274: uncache_lwp(GC_current_ids[i]);
275: GC_current_ids[i] = me; /* ignore */
276: }
277: }
278: }
279: }
280: /*
281: * In the unlikely event something does a fork between the
282: * PIOCSTATUS and the PIOCLWPIDS.
283: */
284: if (i >= max_lwps)
285: continue;
286: /* All lwps in GC_current_ids != me have been suspended. Note */
287: /* that _lwp_suspend is idempotent. */
288: for (i = 0; GC_current_ids[i] != 0; i++) {
289: if (GC_current_ids[i] != last_ids[i]) {
290: if (GC_current_ids[i] != me) {
291: lwp_fd = open_lwp(GC_current_ids[i]);
292: if (lwp_fd == -1)
293: {
294: GC_current_ids[i] = me;
295: continue;
296: }
297: /* LWP should be stopped. Empirically it sometimes */
298: /* isn't, and more frequently the PR_STOPPED flag */
299: /* is not set. Wait for PR_STOPPED. */
300: if (syscall(SYS_ioctl, lwp_fd,
301: PIOCSTATUS, &status) < 0) {
302: /* Possible if the descriptor was stale, or */
303: /* we encountered the 2.3 _lwp_suspend bug. */
304: uncache_lwp(GC_current_ids[i]);
305: GC_current_ids[i] = me; /* handle next time. */
306: } else {
307: while (!(status.pr_flags & PR_STOPPED)) {
308: GC_msec_sleep(1);
309: if (syscall(SYS_ioctl, lwp_fd,
310: PIOCSTATUS, &status) < 0) {
311: ABORT("Repeated PIOCSTATUS failed");
312: }
313: if (status.pr_flags & PR_STOPPED) break;
314:
315: GC_msec_sleep(20);
316: if (syscall(SYS_ioctl, lwp_fd,
317: PIOCSTATUS, &status) < 0) {
318: ABORT("Repeated PIOCSTATUS failed");
319: }
320: }
321: if (status.pr_who != GC_current_ids[i]) {
322: /* can happen if thread was on death row */
323: uncache_lwp(GC_current_ids[i]);
324: GC_current_ids[i] = me; /* handle next time. */
325: continue;
326: }
327: /* Save registers where collector can */
328: /* find them. */
329: BCOPY(status.pr_reg, GC_lwp_registers[i],
330: sizeof (prgregset_t));
331: }
332: }
333: }
334: }
335: if (!changed) break;
336: for (i = 0; i < max_lwps; i++) last_ids[i] = GC_current_ids[i];
337: }
338: }
339:
340: /* Restart all lwps in process. Assumes preemption is off. */
341: static void restart_all_lwps()
342: {
343: int lwp_fd;
344: register int i;
345: GC_bool changed;
346: lwpid_t me = _lwp_self();
347: # define PARANOID
348:
349: for (i = 0; GC_current_ids[i] != 0; i++) {
350: # ifdef PARANOID
351: if (GC_current_ids[i] != me) {
352: int lwp_fd = open_lwp(GC_current_ids[i]);
353: prstatus_t status;
354:
355: if (lwp_fd < 0) ABORT("open_lwp failed");
356: if (syscall(SYS_ioctl, lwp_fd,
357: PIOCSTATUS, &status) < 0) {
358: ABORT("PIOCSTATUS failed in restart_all_lwps");
359: }
360: if (memcmp(status.pr_reg, GC_lwp_registers[i],
361: sizeof (prgregset_t)) != 0) {
362: int j;
363:
364: for(j = 0; j < NGREG; j++)
365: {
366: GC_printf3("%i: %x -> %x\n", j,
367: GC_lwp_registers[i][j],
368: status.pr_reg[j]);
369: }
370: ABORT("Register contents changed");
371: }
372: if (!status.pr_flags & PR_STOPPED) {
373: ABORT("lwp no longer stopped");
374: }
375: #ifdef SPARC
376: {
377: gwindows_t windows;
378: if (syscall(SYS_ioctl, lwp_fd,
379: PIOCGWIN, &windows) < 0) {
380: ABORT("PIOCSTATUS failed in restart_all_lwps");
381: }
382: if (windows.wbcnt > 0) ABORT("unsaved register windows");
383: }
384: #endif
385: }
386: # endif /* PARANOID */
387: if (GC_current_ids[i] == me) continue;
388: if (_lwp_continue(GC_current_ids[i]) < 0) {
389: ABORT("Failed to restart lwp");
390: }
391: }
392: if (i >= max_lwps) ABORT("Too many lwps");
393: }
394:
395: GC_bool GC_multithreaded = 0;
396:
397: void GC_stop_world()
398: {
399: preempt_off();
400: if (GC_multithreaded)
401: stop_all_lwps();
402: }
403:
404: void GC_start_world()
405: {
406: if (GC_multithreaded)
407: restart_all_lwps();
408: preempt_on();
409: }
410:
411: void GC_thr_init(void);
412:
413: GC_bool GC_thr_initialized = FALSE;
414:
415: size_t GC_min_stack_sz;
416:
417: size_t GC_page_sz;
418:
419: /*
420: * stack_head is stored at the top of free stacks
421: */
422: struct stack_head {
423: struct stack_head *next;
424: ptr_t base;
425: thread_t owner;
426: };
427:
428: # define N_FREE_LISTS 25
429: struct stack_head *GC_stack_free_lists[N_FREE_LISTS] = { 0 };
430: /* GC_stack_free_lists[i] is free list for stacks of */
431: /* size GC_min_stack_sz*2**i. */
432: /* Free lists are linked through stack_head stored */ /* at top of stack. */
433:
434: /* Return a stack of size at least *stack_size. *stack_size is */
435: /* replaced by the actual stack size. */
436: /* Caller holds allocation lock. */
437: ptr_t GC_stack_alloc(size_t * stack_size)
438: {
439: register size_t requested_sz = *stack_size;
440: register size_t search_sz = GC_min_stack_sz;
441: register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
442: register ptr_t base;
443: register struct stack_head *result;
444:
445: while (search_sz < requested_sz) {
446: search_sz *= 2;
447: index++;
448: }
449: if ((result = GC_stack_free_lists[index]) == 0
450: && (result = GC_stack_free_lists[index+1]) != 0) {
451: /* Try next size up. */
452: search_sz *= 2; index++;
453: }
454: if (result != 0) {
455: base = GC_stack_free_lists[index]->base;
456: GC_stack_free_lists[index] = GC_stack_free_lists[index]->next;
457: } else {
458: #ifdef MMAP_STACKS
459: base = (ptr_t)mmap(0, search_sz + GC_page_sz,
460: PROT_READ|PROT_WRITE, MAP_PRIVATE |MAP_NORESERVE,
461: GC_zfd, 0);
462: if (base == (ptr_t)-1)
463: {
464: *stack_size = 0;
465: return NULL;
466: }
467:
468: mprotect(base, GC_page_sz, PROT_NONE);
469: /* Should this use divHBLKSZ(search_sz + GC_page_sz) ? -- cf */
470: GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
471: base += GC_page_sz;
472:
473: #else
474: base = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_sz);
475: if (base == NULL)
476: {
477: *stack_size = 0;
478: return NULL;
479: }
480:
481: base = (ptr_t)(((word)base + GC_page_sz) & ~(GC_page_sz - 1));
482: /* Protect hottest page to detect overflow. */
483: # ifdef SOLARIS23_MPROTECT_BUG_FIXED
484: mprotect(base, GC_page_sz, PROT_NONE);
485: # endif
486: GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
487:
488: base += GC_page_sz;
489: #endif
490: }
491: *stack_size = search_sz;
492: return(base);
493: }
494:
495: /* Caller holds allocationlock. */
496: void GC_stack_free(ptr_t stack, size_t size)
497: {
498: register int index = 0;
499: register size_t search_sz = GC_min_stack_sz;
500: register struct stack_head *head;
501:
502: #ifdef MMAP_STACKS
503: /* Zero pointers */
504: mmap(stack, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED,
505: GC_zfd, 0);
506: #endif
507: while (search_sz < size) {
508: search_sz *= 2;
509: index++;
510: }
511: if (search_sz != size) ABORT("Bad stack size");
512:
513: head = (struct stack_head *)(stack + search_sz - sizeof(struct stack_head));
514: head->next = GC_stack_free_lists[index];
515: head->base = stack;
516: GC_stack_free_lists[index] = head;
517: }
518:
519: void GC_my_stack_limits();
520:
521: /* Notify virtual dirty bit implementation that known empty parts of */
522: /* stacks do not contain useful data. */
523: /* Caller holds allocation lock. */
524: void GC_old_stacks_are_fresh()
525: {
526: /* No point in doing this for MMAP stacks - and pointers are zero'd out */
527: /* by the mmap in GC_stack_free */
528: #ifndef MMAP_STACKS
529: register int i;
530: register struct stack_head *s;
531: register ptr_t p;
532: register size_t sz;
533: register struct hblk * h;
534: int dummy;
535:
536: for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;
537: i++, sz *= 2) {
538: for (s = GC_stack_free_lists[i]; s != 0; s = s->next) {
539: p = s->base;
540: h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));
541: if ((ptr_t)h == p) {
542: GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));
543: } else {
544: GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);
545: BZERO(p, (ptr_t)h - p);
546: }
547: }
548: }
549: #endif /* MMAP_STACKS */
550: GC_my_stack_limits();
551: }
552:
553: /* The set of all known threads. We intercept thread creation and */
554: /* joins. We never actually create detached threads. We allocate all */
555: /* new thread stacks ourselves. These allow us to maintain this */
556: /* data structure. */
557:
558: # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
559: volatile GC_thread GC_threads[THREAD_TABLE_SZ];
560:
561: /* Add a thread to GC_threads. We assume it wasn't already there. */
562: /* Caller holds allocation lock. */
563: GC_thread GC_new_thread(thread_t id)
564: {
565: int hv = ((word)id) % THREAD_TABLE_SZ;
566: GC_thread result;
567: static struct GC_Thread_Rep first_thread;
568: static GC_bool first_thread_used = FALSE;
569:
570: if (!first_thread_used) {
571: result = &first_thread;
572: first_thread_used = TRUE;
573: /* Dont acquire allocation lock, since we may already hold it. */
574: } else {
575: result = (struct GC_Thread_Rep *)
576: GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
577: }
578: if (result == 0) return(0);
579: result -> id = id;
580: result -> next = GC_threads[hv];
581: GC_threads[hv] = result;
582: /* result -> finished = 0; */
583: (void) cond_init(&(result->join_cv), USYNC_THREAD, 0);
584: return(result);
585: }
586:
587: /* Delete a thread from GC_threads. We assume it is there. */
588: /* (The code intentionally traps if it wasn't.) */
589: /* Caller holds allocation lock. */
590: void GC_delete_thread(thread_t id)
591: {
592: int hv = ((word)id) % THREAD_TABLE_SZ;
593: register GC_thread p = GC_threads[hv];
594: register GC_thread prev = 0;
595:
596: while (p -> id != id) {
597: prev = p;
598: p = p -> next;
599: }
600: if (prev == 0) {
601: GC_threads[hv] = p -> next;
602: } else {
603: prev -> next = p -> next;
604: }
605: }
606:
607: /* Return the GC_thread correpsonding to a given thread_t. */
608: /* Returns 0 if it's not there. */
609: /* Caller holds allocation lock. */
610: GC_thread GC_lookup_thread(thread_t id)
611: {
612: int hv = ((word)id) % THREAD_TABLE_SZ;
613: register GC_thread p = GC_threads[hv];
614:
615: while (p != 0 && p -> id != id) p = p -> next;
616: return(p);
617: }
618:
1.1.1.2 maekawa 619: # define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
620:
621: word GC_get_orig_stack_size() {
622: struct rlimit rl;
623: static int warned = 0;
624: int result;
625:
626: if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
627: result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
628: if (result > MAX_ORIG_STACK_SIZE) {
629: if (!warned) {
630: WARN("Large stack limit(%ld): only scanning 8 MB", result);
631: warned = 1;
632: }
633: result = MAX_ORIG_STACK_SIZE;
634: }
635: return result;
636: }
637:
1.1 maekawa 638: /* Notify dirty bit implementation of unused parts of my stack. */
639: /* Caller holds allocation lock. */
640: void GC_my_stack_limits()
641: {
642: int dummy;
643: register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));
644: register GC_thread me = GC_lookup_thread(thr_self());
645: register size_t stack_size = me -> stack_size;
646: register ptr_t stack;
647:
648: if (stack_size == 0) {
649: /* original thread */
650: /* Empirically, what should be the stack page with lowest */
651: /* address is actually inaccessible. */
1.1.1.2 maekawa 652: stack_size = GC_get_orig_stack_size() - GC_page_sz;
1.1 maekawa 653: stack = GC_stackbottom - stack_size + GC_page_sz;
654: } else {
655: stack = me -> stack;
656: }
657: if (stack > hottest || stack + stack_size < hottest) {
658: ABORT("sp out of bounds");
659: }
660: GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));
661: }
662:
663:
1.1.1.3 ! maekawa 664: /* We hold allocation lock. Should do exactly the right thing if the */
! 665: /* world is stopped. Should not fail if it isn't. */
1.1 maekawa 666: void GC_push_all_stacks()
667: {
668: register int i;
669: register GC_thread p;
670: register ptr_t sp = GC_approx_sp();
671: register ptr_t bottom, top;
672: struct rlimit rl;
673:
674: # define PUSH(bottom,top) \
675: if (GC_dirty_maintained) { \
676: GC_push_dirty((bottom), (top), GC_page_was_ever_dirty, \
677: GC_push_all_stack); \
678: } else { \
679: GC_push_all_stack((bottom), (top)); \
680: }
681: GC_push_all_stack((ptr_t)GC_lwp_registers,
682: (ptr_t)GC_lwp_registers
683: + max_lwps * sizeof(GC_lwp_registers[0]));
684: for (i = 0; i < THREAD_TABLE_SZ; i++) {
685: for (p = GC_threads[i]; p != 0; p = p -> next) {
686: if (p -> stack_size != 0) {
687: bottom = p -> stack;
688: top = p -> stack + p -> stack_size;
689: } else {
690: /* The original stack. */
1.1.1.2 maekawa 691: bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_sz;
1.1 maekawa 692: top = GC_stackbottom;
693: }
694: if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
695: PUSH(bottom, top);
696: }
697: }
698: }
699:
700:
701: int GC_is_thread_stack(ptr_t addr)
702: {
703: register int i;
704: register GC_thread p;
705: register ptr_t bottom, top;
706: struct rlimit rl;
707:
708: for (i = 0; i < THREAD_TABLE_SZ; i++) {
709: for (p = GC_threads[i]; p != 0; p = p -> next) {
710: if (p -> stack_size != 0) {
711: if (p -> stack <= addr &&
712: addr < p -> stack + p -> stack_size)
713: return 1;
714: }
715: }
716: }
717: }
718:
719: /* The only thread that ever really performs a thr_join. */
720: void * GC_thr_daemon(void * dummy)
721: {
722: void *status;
723: thread_t departed;
724: register GC_thread t;
725: register int i;
726: register int result;
727:
728: for(;;) {
729: start:
730: result = thr_join((thread_t)0, &departed, &status);
731: LOCK();
732: if (result != 0) {
733: /* No more threads; wait for create. */
734: for (i = 0; i < THREAD_TABLE_SZ; i++) {
735: for (t = GC_threads[i]; t != 0; t = t -> next) {
736: if (!(t -> flags & (DETACHED | FINISHED))) {
737: UNLOCK();
738: goto start; /* Thread started just before we */
739: /* acquired the lock. */
740: }
741: }
742: }
743: cond_wait(&GC_create_cv, &GC_allocate_ml);
744: UNLOCK();
745: } else {
746: t = GC_lookup_thread(departed);
747: GC_multithreaded--;
748: if (!(t -> flags & CLIENT_OWNS_STACK)) {
749: GC_stack_free(t -> stack, t -> stack_size);
750: }
751: if (t -> flags & DETACHED) {
752: GC_delete_thread(departed);
753: } else {
754: t -> status = status;
755: t -> flags |= FINISHED;
756: cond_signal(&(t -> join_cv));
757: cond_broadcast(&GC_prom_join_cv);
758: }
759: UNLOCK();
760: }
761: }
762: }
763:
764: /* We hold the allocation lock, or caller ensures that 2 instances */
765: /* cannot be invoked concurrently. */
766: void GC_thr_init(void)
767: {
768: GC_thread t;
769: thread_t tid;
770:
771: if (GC_thr_initialized)
772: return;
773: GC_thr_initialized = TRUE;
774: GC_min_stack_sz = ((thr_min_stack() + 32*1024 + HBLKSIZE-1)
775: & ~(HBLKSIZE - 1));
776: GC_page_sz = sysconf(_SC_PAGESIZE);
777: #ifdef MMAP_STACKS
778: GC_zfd = open("/dev/zero", O_RDONLY);
779: if (GC_zfd == -1)
780: ABORT("Can't open /dev/zero");
781: #endif /* MMAP_STACKS */
782: cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
783: cond_init(&GC_create_cv, USYNC_THREAD, 0);
784: /* Add the initial thread, so we can stop it. */
785: t = GC_new_thread(thr_self());
786: t -> stack_size = 0;
787: t -> flags = DETACHED | CLIENT_OWNS_STACK;
788: if (thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,
789: 0 /* arg */, THR_DETACHED | THR_DAEMON,
790: &tid /* thread_id */) != 0) {
791: ABORT("Cant fork daemon");
792: }
793: thr_setprio(tid, 126);
794: }
795:
796: /* We acquire the allocation lock to prevent races with */
797: /* stopping/starting world. */
798: /* This is no more correct than the underlying Solaris 2.X */
799: /* implementation. Under 2.3 THIS IS BROKEN. */
800: int GC_thr_suspend(thread_t target_thread)
801: {
802: GC_thread t;
803: int result;
804:
805: LOCK();
806: result = thr_suspend(target_thread);
807: if (result == 0) {
808: t = GC_lookup_thread(target_thread);
809: if (t == 0) ABORT("thread unknown to GC");
810: t -> flags |= SUSPENDED;
811: }
812: UNLOCK();
813: return(result);
814: }
815:
816: int GC_thr_continue(thread_t target_thread)
817: {
818: GC_thread t;
819: int result;
820:
821: LOCK();
822: result = thr_continue(target_thread);
823: if (result == 0) {
824: t = GC_lookup_thread(target_thread);
825: if (t == 0) ABORT("thread unknown to GC");
826: t -> flags &= ~SUSPENDED;
827: }
828: UNLOCK();
829: return(result);
830: }
831:
832: int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
833: {
834: register GC_thread t;
835: int result = 0;
836:
837: LOCK();
838: if (wait_for == 0) {
839: register int i;
840: register GC_bool thread_exists;
841:
842: for (;;) {
843: thread_exists = FALSE;
844: for (i = 0; i < THREAD_TABLE_SZ; i++) {
845: for (t = GC_threads[i]; t != 0; t = t -> next) {
846: if (!(t -> flags & DETACHED)) {
847: if (t -> flags & FINISHED) {
848: goto found;
849: }
850: thread_exists = TRUE;
851: }
852: }
853: }
854: if (!thread_exists) {
855: result = ESRCH;
856: goto out;
857: }
858: cond_wait(&GC_prom_join_cv, &GC_allocate_ml);
859: }
860: } else {
861: t = GC_lookup_thread(wait_for);
862: if (t == 0 || t -> flags & DETACHED) {
863: result = ESRCH;
864: goto out;
865: }
866: if (wait_for == thr_self()) {
867: result = EDEADLK;
868: goto out;
869: }
870: while (!(t -> flags & FINISHED)) {
871: cond_wait(&(t -> join_cv), &GC_allocate_ml);
872: }
873:
874: }
875: found:
876: if (status) *status = t -> status;
877: if (departed) *departed = t -> id;
878: cond_destroy(&(t -> join_cv));
879: GC_delete_thread(t -> id);
880: out:
881: UNLOCK();
882: return(result);
883: }
884:
885:
886: int
887: GC_thr_create(void *stack_base, size_t stack_size,
888: void *(*start_routine)(void *), void *arg, long flags,
889: thread_t *new_thread)
890: {
891: int result;
892: GC_thread t;
893: thread_t my_new_thread;
894: word my_flags = 0;
895: void * stack = stack_base;
896:
897: LOCK();
898: if (!GC_thr_initialized)
899: {
900: GC_thr_init();
901: }
902: GC_multithreaded++;
903: if (stack == 0) {
1.1.1.3 ! maekawa 904: if (stack_size == 0) stack_size = 1024*1024;
1.1 maekawa 905: stack = (void *)GC_stack_alloc(&stack_size);
906: if (stack == 0) {
907: GC_multithreaded--;
908: UNLOCK();
909: return(ENOMEM);
910: }
911: } else {
912: my_flags |= CLIENT_OWNS_STACK;
913: }
914: if (flags & THR_DETACHED) my_flags |= DETACHED;
915: if (flags & THR_SUSPENDED) my_flags |= SUSPENDED;
916: result = thr_create(stack, stack_size, start_routine,
917: arg, flags & ~THR_DETACHED, &my_new_thread);
918: if (result == 0) {
919: t = GC_new_thread(my_new_thread);
920: t -> flags = my_flags;
921: if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);
922: t -> stack = stack;
923: t -> stack_size = stack_size;
924: if (new_thread != 0) *new_thread = my_new_thread;
925: cond_signal(&GC_create_cv);
926: } else {
927: GC_multithreaded--;
928: if (!(my_flags & CLIENT_OWNS_STACK)) {
929: GC_stack_free(stack, stack_size);
930: }
931: }
932: UNLOCK();
933: return(result);
934: }
935:
936: # else /* SOLARIS_THREADS */
937:
938: #ifndef LINT
939: int GC_no_sunOS_threads;
940: #endif
941: #endif
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>