Annotation of OpenXM_contrib2/asir2000/gc5.3/irix_threads.c, Revision 1.1.1.1
1.1 noro 1: /*
2: * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3: * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4: *
5: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7: *
8: * Permission is hereby granted to use or copy this program
9: * for any purpose, provided the above notices are retained on all copies.
10: * Permission to modify the code and to distribute modified code is granted,
11: * provided the above notices are retained, and a notice that the code was
12: * modified is included with the above copyright notice.
13: */
14: /*
15: * Support code for Irix (>=6.2) Pthreads. This relies on properties
16: * not guaranteed by the Pthread standard. It may or may not be portable
17: * to other implementations.
18: *
19: * Note that there is a lot of code duplication between linux_threads.c
20: * and irix_threads.c; any changes made here may need to be reflected
21: * there too.
22: */
23:
24: # if defined(IRIX_THREADS)
25:
26: # include "gc_priv.h"
27: # include <pthread.h>
28: # include <semaphore.h>
29: # include <time.h>
30: # include <errno.h>
31: # include <unistd.h>
32: # include <sys/mman.h>
33: # include <sys/time.h>
34:
35: #undef pthread_create
36: #undef pthread_sigmask
37: #undef pthread_join
38:
39: void GC_thr_init();
40:
41: #if 0
42: void GC_print_sig_mask()
43: {
44: sigset_t blocked;
45: int i;
46:
47: if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
48: ABORT("pthread_sigmask");
49: GC_printf0("Blocked: ");
50: for (i = 1; i <= MAXSIG; i++) {
51: if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
52: }
53: GC_printf0("\n");
54: }
55: #endif
56:
57: /* We use the allocation lock to protect thread-related data structures. */
58:
59: /* The set of all known threads. We intercept thread creation and */
60: /* joins. We never actually create detached threads. We allocate all */
61: /* new thread stacks ourselves. These allow us to maintain this */
62: /* data structure. */
63: /* Protected by GC_thr_lock. */
64: /* Some of this should be declared volatile, but that's incosnsistent */
65: /* with some library routine declarations. */
66: typedef struct GC_Thread_Rep {
67: struct GC_Thread_Rep * next; /* More recently allocated threads */
68: /* with a given pthread id come */
69: /* first. (All but the first are */
70: /* guaranteed to be dead, but we may */
71: /* not yet have registered the join.) */
72: pthread_t id;
73: word stop;
74: # define NOT_STOPPED 0
75: # define PLEASE_STOP 1
76: # define STOPPED 2
77: word flags;
78: # define FINISHED 1 /* Thread has exited. */
79: # define DETACHED 2 /* Thread is intended to be detached. */
80: # define CLIENT_OWNS_STACK 4
81: /* Stack was supplied by client. */
82: ptr_t stack;
83: ptr_t stack_ptr; /* Valid only when stopped. */
84: /* But must be within stack region at */
85: /* all times. */
86: size_t stack_size; /* 0 for original thread. */
87: void * status; /* Used only to avoid premature */
88: /* reclamation of any data it might */
89: /* reference. */
90: } * GC_thread;
91:
92: GC_thread GC_lookup_thread(pthread_t id);
93:
94: /*
95: * The only way to suspend threads given the pthread interface is to send
96: * signals. Unfortunately, this means we have to reserve
97: * a signal, and intercept client calls to change the signal mask.
98: */
99: # define SIG_SUSPEND (SIGRTMIN + 6)
100:
101: pthread_mutex_t GC_suspend_lock = PTHREAD_MUTEX_INITIALIZER;
102: /* Number of threads stopped so far */
103: pthread_cond_t GC_suspend_ack_cv = PTHREAD_COND_INITIALIZER;
104: pthread_cond_t GC_continue_cv = PTHREAD_COND_INITIALIZER;
105:
106: void GC_suspend_handler(int sig)
107: {
108: int dummy;
109: GC_thread me;
110: sigset_t all_sigs;
111: sigset_t old_sigs;
112: int i;
113:
114: if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
115: me = GC_lookup_thread(pthread_self());
116: /* The lookup here is safe, since I'm doing this on behalf */
117: /* of a thread which holds the allocation lock in order */
118: /* to stop the world. Thus concurrent modification of the */
119: /* data structure is impossible. */
120: if (PLEASE_STOP != me -> stop) {
121: /* Misdirected signal. */
122: pthread_mutex_unlock(&GC_suspend_lock);
123: return;
124: }
125: pthread_mutex_lock(&GC_suspend_lock);
126: me -> stack_ptr = (ptr_t)(&dummy);
127: me -> stop = STOPPED;
128: pthread_cond_signal(&GC_suspend_ack_cv);
129: pthread_cond_wait(&GC_continue_cv, &GC_suspend_lock);
130: pthread_mutex_unlock(&GC_suspend_lock);
131: /* GC_printf1("Continuing 0x%x\n", pthread_self()); */
132: }
133:
134:
135: GC_bool GC_thr_initialized = FALSE;
136:
137: size_t GC_min_stack_sz;
138:
139: size_t GC_page_sz;
140:
141: # define N_FREE_LISTS 25
142: ptr_t GC_stack_free_lists[N_FREE_LISTS] = { 0 };
143: /* GC_stack_free_lists[i] is free list for stacks of */
144: /* size GC_min_stack_sz*2**i. */
145: /* Free lists are linked through first word. */
146:
147: /* Return a stack of size at least *stack_size. *stack_size is */
148: /* replaced by the actual stack size. */
149: /* Caller holds allocation lock. */
150: ptr_t GC_stack_alloc(size_t * stack_size)
151: {
152: register size_t requested_sz = *stack_size;
153: register size_t search_sz = GC_min_stack_sz;
154: register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
155: register ptr_t result;
156:
157: while (search_sz < requested_sz) {
158: search_sz *= 2;
159: index++;
160: }
161: if ((result = GC_stack_free_lists[index]) == 0
162: && (result = GC_stack_free_lists[index+1]) != 0) {
163: /* Try next size up. */
164: search_sz *= 2; index++;
165: }
166: if (result != 0) {
167: GC_stack_free_lists[index] = *(ptr_t *)result;
168: } else {
169: result = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_sz);
170: result = (ptr_t)(((word)result + GC_page_sz) & ~(GC_page_sz - 1));
171: /* Protect hottest page to detect overflow. */
172: /* mprotect(result, GC_page_sz, PROT_NONE); */
173: result += GC_page_sz;
174: }
175: *stack_size = search_sz;
176: return(result);
177: }
178:
179: /* Caller holds allocation lock. */
180: void GC_stack_free(ptr_t stack, size_t size)
181: {
182: register int index = 0;
183: register size_t search_sz = GC_min_stack_sz;
184:
185: while (search_sz < size) {
186: search_sz *= 2;
187: index++;
188: }
189: if (search_sz != size) ABORT("Bad stack size");
190: *(ptr_t *)stack = GC_stack_free_lists[index];
191: GC_stack_free_lists[index] = stack;
192: }
193:
194:
195:
196: # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
197: volatile GC_thread GC_threads[THREAD_TABLE_SZ];
198:
199: /* Add a thread to GC_threads. We assume it wasn't already there. */
200: /* Caller holds allocation lock. */
201: GC_thread GC_new_thread(pthread_t id)
202: {
203: int hv = ((word)id) % THREAD_TABLE_SZ;
204: GC_thread result;
205: static struct GC_Thread_Rep first_thread;
206: static GC_bool first_thread_used = FALSE;
207:
208: if (!first_thread_used) {
209: result = &first_thread;
210: first_thread_used = TRUE;
211: /* Dont acquire allocation lock, since we may already hold it. */
212: } else {
213: result = (struct GC_Thread_Rep *)
214: GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
215: }
216: if (result == 0) return(0);
217: result -> id = id;
218: result -> next = GC_threads[hv];
219: GC_threads[hv] = result;
220: /* result -> flags = 0; */
221: /* result -> stop = 0; */
222: return(result);
223: }
224:
225: /* Delete a thread from GC_threads. We assume it is there. */
226: /* (The code intentionally traps if it wasn't.) */
227: /* Caller holds allocation lock. */
228: void GC_delete_thread(pthread_t id)
229: {
230: int hv = ((word)id) % THREAD_TABLE_SZ;
231: register GC_thread p = GC_threads[hv];
232: register GC_thread prev = 0;
233:
234: while (!pthread_equal(p -> id, id)) {
235: prev = p;
236: p = p -> next;
237: }
238: if (prev == 0) {
239: GC_threads[hv] = p -> next;
240: } else {
241: prev -> next = p -> next;
242: }
243: }
244:
245: /* If a thread has been joined, but we have not yet */
246: /* been notified, then there may be more than one thread */
247: /* in the table with the same pthread id. */
248: /* This is OK, but we need a way to delete a specific one. */
249: void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
250: {
251: int hv = ((word)id) % THREAD_TABLE_SZ;
252: register GC_thread p = GC_threads[hv];
253: register GC_thread prev = 0;
254:
255: while (p != gc_id) {
256: prev = p;
257: p = p -> next;
258: }
259: if (prev == 0) {
260: GC_threads[hv] = p -> next;
261: } else {
262: prev -> next = p -> next;
263: }
264: }
265:
266: /* Return a GC_thread corresponding to a given thread_t. */
267: /* Returns 0 if it's not there. */
268: /* Caller holds allocation lock or otherwise inhibits */
269: /* updates. */
270: /* If there is more than one thread with the given id we */
271: /* return the most recent one. */
272: GC_thread GC_lookup_thread(pthread_t id)
273: {
274: int hv = ((word)id) % THREAD_TABLE_SZ;
275: register GC_thread p = GC_threads[hv];
276:
277: while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
278: return(p);
279: }
280:
281:
282: /* Caller holds allocation lock. */
283: void GC_stop_world()
284: {
285: pthread_t my_thread = pthread_self();
286: register int i;
287: register GC_thread p;
288: register int result;
289: struct timespec timeout;
290:
291: for (i = 0; i < THREAD_TABLE_SZ; i++) {
292: for (p = GC_threads[i]; p != 0; p = p -> next) {
293: if (p -> id != my_thread) {
294: if (p -> flags & FINISHED) {
295: p -> stop = STOPPED;
296: continue;
297: }
298: p -> stop = PLEASE_STOP;
299: result = pthread_kill(p -> id, SIG_SUSPEND);
300: /* GC_printf1("Sent signal to 0x%x\n", p -> id); */
301: switch(result) {
302: case ESRCH:
303: /* Not really there anymore. Possible? */
304: p -> stop = STOPPED;
305: break;
306: case 0:
307: break;
308: default:
309: ABORT("pthread_kill failed");
310: }
311: }
312: }
313: }
314: pthread_mutex_lock(&GC_suspend_lock);
315: for (i = 0; i < THREAD_TABLE_SZ; i++) {
316: for (p = GC_threads[i]; p != 0; p = p -> next) {
317: while (p -> id != my_thread && p -> stop != STOPPED) {
318: clock_gettime(CLOCK_REALTIME, &timeout);
319: timeout.tv_nsec += 50000000; /* 50 msecs */
320: if (timeout.tv_nsec >= 1000000000) {
321: timeout.tv_nsec -= 1000000000;
322: ++timeout.tv_sec;
323: }
324: result = pthread_cond_timedwait(&GC_suspend_ack_cv,
325: &GC_suspend_lock,
326: &timeout);
327: if (result == ETIMEDOUT) {
328: /* Signal was lost or misdirected. Try again. */
329: /* Duplicate signals should be benign. */
330: result = pthread_kill(p -> id, SIG_SUSPEND);
331: }
332: }
333: }
334: }
335: pthread_mutex_unlock(&GC_suspend_lock);
336: /* GC_printf1("World stopped 0x%x\n", pthread_self()); */
337: }
338:
339: /* Caller holds allocation lock. */
340: void GC_start_world()
341: {
342: GC_thread p;
343: unsigned i;
344:
345: /* GC_printf0("World starting\n"); */
346: for (i = 0; i < THREAD_TABLE_SZ; i++) {
347: for (p = GC_threads[i]; p != 0; p = p -> next) {
348: p -> stop = NOT_STOPPED;
349: }
350: }
351: pthread_mutex_lock(&GC_suspend_lock);
352: /* All other threads are at pthread_cond_wait in signal handler. */
353: /* Otherwise we couldn't have acquired the lock. */
354: pthread_mutex_unlock(&GC_suspend_lock);
355: pthread_cond_broadcast(&GC_continue_cv);
356: }
357:
358: # ifdef MMAP_STACKS
359: --> not really supported yet.
360: int GC_is_thread_stack(ptr_t addr)
361: {
362: register int i;
363: register GC_thread p;
364:
365: for (i = 0; i < THREAD_TABLE_SZ; i++) {
366: for (p = GC_threads[i]; p != 0; p = p -> next) {
367: if (p -> stack_size != 0) {
368: if (p -> stack <= addr &&
369: addr < p -> stack + p -> stack_size)
370: return 1;
371: }
372: }
373: }
374: return 0;
375: }
376: # endif
377:
378: /* We hold allocation lock. We assume the world is stopped. */
379: void GC_push_all_stacks()
380: {
381: register int i;
382: register GC_thread p;
383: register ptr_t sp = GC_approx_sp();
384: register ptr_t lo, hi;
385: pthread_t me = pthread_self();
386:
387: if (!GC_thr_initialized) GC_thr_init();
388: /* GC_printf1("Pushing stacks from thread 0x%x\n", me); */
389: for (i = 0; i < THREAD_TABLE_SZ; i++) {
390: for (p = GC_threads[i]; p != 0; p = p -> next) {
391: if (p -> flags & FINISHED) continue;
392: if (pthread_equal(p -> id, me)) {
393: lo = GC_approx_sp();
394: } else {
395: lo = p -> stack_ptr;
396: }
397: if (p -> stack_size != 0) {
398: hi = p -> stack + p -> stack_size;
399: } else {
400: /* The original stack. */
401: hi = GC_stackbottom;
402: }
403: GC_push_all_stack(lo, hi);
404: }
405: }
406: }
407:
408:
409: /* We hold the allocation lock. */
410: void GC_thr_init()
411: {
412: GC_thread t;
413: struct sigaction act;
414:
415: if (GC_thr_initialized) return;
416: GC_thr_initialized = TRUE;
417: GC_min_stack_sz = HBLKSIZE;
418: GC_page_sz = sysconf(_SC_PAGESIZE);
419: (void) sigaction(SIG_SUSPEND, 0, &act);
420: if (act.sa_handler != SIG_DFL)
421: ABORT("Previously installed SIG_SUSPEND handler");
422: /* Install handler. */
423: act.sa_handler = GC_suspend_handler;
424: act.sa_flags = SA_RESTART;
425: (void) sigemptyset(&act.sa_mask);
426: if (0 != sigaction(SIG_SUSPEND, &act, 0))
427: ABORT("Failed to install SIG_SUSPEND handler");
428: /* Add the initial thread, so we can stop it. */
429: t = GC_new_thread(pthread_self());
430: t -> stack_size = 0;
431: t -> stack_ptr = (ptr_t)(&t);
432: t -> flags = DETACHED;
433: }
434:
435: int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
436: {
437: sigset_t fudged_set;
438:
439: if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
440: fudged_set = *set;
441: sigdelset(&fudged_set, SIG_SUSPEND);
442: set = &fudged_set;
443: }
444: return(pthread_sigmask(how, set, oset));
445: }
446:
447: struct start_info {
448: void *(*start_routine)(void *);
449: void *arg;
450: word flags;
451: ptr_t stack;
452: size_t stack_size;
453: sem_t registered; /* 1 ==> in our thread table, but */
454: /* parent hasn't yet noticed. */
455: };
456:
457: void GC_thread_exit_proc(void *arg)
458: {
459: GC_thread me;
460:
461: LOCK();
462: me = GC_lookup_thread(pthread_self());
463: if (me -> flags & DETACHED) {
464: GC_delete_thread(pthread_self());
465: } else {
466: me -> flags |= FINISHED;
467: }
468: UNLOCK();
469: }
470:
471: int GC_pthread_join(pthread_t thread, void **retval)
472: {
473: int result;
474: GC_thread thread_gc_id;
475:
476: LOCK();
477: thread_gc_id = GC_lookup_thread(thread);
478: /* This is guaranteed to be the intended one, since the thread id */
479: /* cant have been recycled by pthreads. */
480: UNLOCK();
481: result = pthread_join(thread, retval);
482: /* Some versions of the Irix pthreads library can erroneously */
483: /* return EINTR when the call succeeds. */
484: if (EINTR == result) result = 0;
485: LOCK();
486: /* Here the pthread thread id may have been recycled. */
487: GC_delete_gc_thread(thread, thread_gc_id);
488: UNLOCK();
489: return result;
490: }
491:
492: void * GC_start_routine(void * arg)
493: {
494: struct start_info * si = arg;
495: void * result;
496: GC_thread me;
497: pthread_t my_pthread;
498: void *(*start)(void *);
499: void *start_arg;
500:
501: my_pthread = pthread_self();
502: /* If a GC occurs before the thread is registered, that GC will */
503: /* ignore this thread. That's fine, since it will block trying to */
504: /* acquire the allocation lock, and won't yet hold interesting */
505: /* pointers. */
506: LOCK();
507: /* We register the thread here instead of in the parent, so that */
508: /* we don't need to hold the allocation lock during pthread_create. */
509: /* Holding the allocation lock there would make REDIRECT_MALLOC */
510: /* impossible. It probably still doesn't work, but we're a little */
511: /* closer ... */
512: /* This unfortunately means that we have to be careful the parent */
513: /* doesn't try to do a pthread_join before we're registered. */
514: me = GC_new_thread(my_pthread);
515: me -> flags = si -> flags;
516: me -> stack = si -> stack;
517: me -> stack_size = si -> stack_size;
518: me -> stack_ptr = (ptr_t)si -> stack + si -> stack_size - sizeof(word);
519: UNLOCK();
520: start = si -> start_routine;
521: start_arg = si -> arg;
522: sem_post(&(si -> registered));
523: pthread_cleanup_push(GC_thread_exit_proc, 0);
524: result = (*start)(start_arg);
525: me -> status = result;
526: me -> flags |= FINISHED;
527: pthread_cleanup_pop(1);
528: /* This involves acquiring the lock, ensuring that we can't exit */
529: /* while a collection that thinks we're alive is trying to stop */
530: /* us. */
531: return(result);
532: }
533:
534: int
535: GC_pthread_create(pthread_t *new_thread,
536: const pthread_attr_t *attr,
537: void *(*start_routine)(void *), void *arg)
538: {
539: int result;
540: GC_thread t;
541: void * stack;
542: size_t stacksize;
543: pthread_attr_t new_attr;
544: int detachstate;
545: word my_flags = 0;
546: struct start_info * si = GC_malloc(sizeof(struct start_info));
547: /* This is otherwise saved only in an area mmapped by the thread */
548: /* library, which isn't visible to the collector. */
549:
550: if (0 == si) return(ENOMEM);
551: sem_init(&(si -> registered), 0, 0);
552: si -> start_routine = start_routine;
553: si -> arg = arg;
554: LOCK();
555: if (!GC_thr_initialized) GC_thr_init();
556: if (NULL == attr) {
557: stack = 0;
558: (void) pthread_attr_init(&new_attr);
559: } else {
560: new_attr = *attr;
561: pthread_attr_getstackaddr(&new_attr, &stack);
562: }
563: pthread_attr_getstacksize(&new_attr, &stacksize);
564: pthread_attr_getdetachstate(&new_attr, &detachstate);
565: if (stacksize < GC_min_stack_sz) ABORT("Stack too small");
566: if (0 == stack) {
567: stack = (void *)GC_stack_alloc(&stacksize);
568: if (0 == stack) {
569: UNLOCK();
570: return(ENOMEM);
571: }
572: pthread_attr_setstackaddr(&new_attr, stack);
573: } else {
574: my_flags |= CLIENT_OWNS_STACK;
575: }
576: if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
577: si -> flags = my_flags;
578: si -> stack = stack;
579: si -> stack_size = stacksize;
580: result = pthread_create(new_thread, &new_attr, GC_start_routine, si);
581: if (0 == new_thread && !(my_flags & CLIENT_OWNS_STACK)) {
582: GC_stack_free(stack, stacksize);
583: }
584: UNLOCK();
585: /* Wait until child has been added to the thread table. */
586: /* This also ensures that we hold onto si until the child is done */
587: /* with it. Thus it doesn't matter whether it is otherwise */
588: /* visible to the collector. */
589: if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed");
590: sem_destroy(&(si -> registered));
591: /* pthread_attr_destroy(&new_attr); */
592: return(result);
593: }
594:
595: GC_bool GC_collecting = 0; /* A hint that we're in the collector and */
596: /* holding the allocation lock for an */
597: /* extended period. */
598:
599: /* Reasonably fast spin locks. Basically the same implementation */
600: /* as STL alloc.h. This isn't really the right way to do this. */
601: /* but until the POSIX scheduling mess gets straightened out ... */
602:
603: unsigned long GC_allocate_lock = 0;
604:
605: #define SLEEP_THRESHOLD 3
606:
607: void GC_lock()
608: {
609: # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
610: # define high_spin_max 1000 /* spin cycles for multiprocessor */
611: static unsigned spin_max = low_spin_max;
612: unsigned my_spin_max;
613: static unsigned last_spins = 0;
614: unsigned my_last_spins;
615: volatile unsigned junk;
616: # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
617: int i;
618:
619: if (!GC_test_and_set(&GC_allocate_lock, 1)) {
620: return;
621: }
622: junk = 0;
623: my_spin_max = spin_max;
624: my_last_spins = last_spins;
625: for (i = 0; i < my_spin_max; i++) {
626: if (GC_collecting) goto yield;
627: if (i < my_last_spins/2 || GC_allocate_lock) {
628: PAUSE;
629: continue;
630: }
631: if (!GC_test_and_set(&GC_allocate_lock, 1)) {
632: /*
633: * got it!
634: * Spinning worked. Thus we're probably not being scheduled
635: * against the other process with which we were contending.
636: * Thus it makes sense to spin longer the next time.
637: */
638: last_spins = i;
639: spin_max = high_spin_max;
640: return;
641: }
642: }
643: /* We are probably being scheduled against the other process. Sleep. */
644: spin_max = low_spin_max;
645: yield:
646: for (i = 0;; ++i) {
647: if (!GC_test_and_set(&GC_allocate_lock, 1)) {
648: return;
649: }
650: if (i < SLEEP_THRESHOLD) {
651: sched_yield();
652: } else {
653: struct timespec ts;
654:
655: if (i > 26) i = 26;
656: /* Don't wait for more than about 60msecs, even */
657: /* under extreme contention. */
658: ts.tv_sec = 0;
659: ts.tv_nsec = 1 << i;
660: nanosleep(&ts, 0);
661: }
662: }
663: }
664:
665:
666:
667: # else
668:
669: #ifndef LINT
670: int GC_no_Irix_threads;
671: #endif
672:
673: # endif /* IRIX_THREADS */
674:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>