Annotation of OpenXM_contrib2/asir2000/gc/irix_threads.c, Revision 1.4
1.1 noro 1: /*
1.2 noro 2: * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
3: * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
1.1 noro 5: *
6: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8: *
9: * Permission is hereby granted to use or copy this program
10: * for any purpose, provided the above notices are retained on all copies.
11: * Permission to modify the code and to distribute modified code is granted,
12: * provided the above notices are retained, and a notice that the code was
13: * modified is included with the above copyright notice.
14: */
15: /*
16: * Support code for Irix (>=6.2) Pthreads. This relies on properties
17: * not guaranteed by the Pthread standard. It may or may not be portable
18: * to other implementations.
19: *
1.2 noro 20: * This now also includes an initial attempt at thread support for
21: * HP/UX 11.
22: *
1.1 noro 23: * Note that there is a lot of code duplication between linux_threads.c
1.4 ! noro 24: * and irix_threads.c; any changes made here may need to be reflected
1.1 noro 25: * there too.
26: */
27:
1.4 ! noro 28: # if defined(GC_IRIX_THREADS)
1.1 noro 29:
1.2 noro 30: # include "private/gc_priv.h"
1.1 noro 31: # include <pthread.h>
32: # include <semaphore.h>
33: # include <time.h>
34: # include <errno.h>
35: # include <unistd.h>
36: # include <sys/mman.h>
37: # include <sys/time.h>
38:
39: #undef pthread_create
40: #undef pthread_sigmask
41: #undef pthread_join
1.2 noro 42: #undef pthread_detach
1.1 noro 43:
1.4 ! noro 44: #ifdef HANDLE_FORK
! 45: --> Not yet supported. Try porting the code from linux_threads.c.
! 46: #endif
! 47:
1.1 noro 48: void GC_thr_init();
49:
50: #if 0
51: void GC_print_sig_mask()
52: {
53: sigset_t blocked;
54: int i;
55:
56: if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
57: ABORT("pthread_sigmask");
58: GC_printf0("Blocked: ");
59: for (i = 1; i <= MAXSIG; i++) {
60: if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
61: }
62: GC_printf0("\n");
63: }
64: #endif
65:
66: /* We use the allocation lock to protect thread-related data structures. */
67:
68: /* The set of all known threads. We intercept thread creation and */
69: /* joins. We never actually create detached threads. We allocate all */
70: /* new thread stacks ourselves. These allow us to maintain this */
71: /* data structure. */
72: /* Protected by GC_thr_lock. */
73: /* Some of this should be declared volatile, but that's incosnsistent */
74: /* with some library routine declarations. */
75: typedef struct GC_Thread_Rep {
76: struct GC_Thread_Rep * next; /* More recently allocated threads */
77: /* with a given pthread id come */
78: /* first. (All but the first are */
79: /* guaranteed to be dead, but we may */
80: /* not yet have registered the join.) */
81: pthread_t id;
82: word stop;
83: # define NOT_STOPPED 0
84: # define PLEASE_STOP 1
85: # define STOPPED 2
86: word flags;
87: # define FINISHED 1 /* Thread has exited. */
88: # define DETACHED 2 /* Thread is intended to be detached. */
89: # define CLIENT_OWNS_STACK 4
90: /* Stack was supplied by client. */
91: ptr_t stack;
92: ptr_t stack_ptr; /* Valid only when stopped. */
93: /* But must be within stack region at */
94: /* all times. */
95: size_t stack_size; /* 0 for original thread. */
96: void * status; /* Used only to avoid premature */
97: /* reclamation of any data it might */
98: /* reference. */
99: } * GC_thread;
100:
101: GC_thread GC_lookup_thread(pthread_t id);
102:
103: /*
104: * The only way to suspend threads given the pthread interface is to send
105: * signals. Unfortunately, this means we have to reserve
106: * a signal, and intercept client calls to change the signal mask.
1.4 ! noro 107: * We use SIG_SUSPEND, defined in gc_priv.h.
1.1 noro 108: */
109:
110: pthread_mutex_t GC_suspend_lock = PTHREAD_MUTEX_INITIALIZER;
111: /* Number of threads stopped so far */
112: pthread_cond_t GC_suspend_ack_cv = PTHREAD_COND_INITIALIZER;
113: pthread_cond_t GC_continue_cv = PTHREAD_COND_INITIALIZER;
114:
115: void GC_suspend_handler(int sig)
116: {
117: int dummy;
118: GC_thread me;
119: sigset_t all_sigs;
120: sigset_t old_sigs;
121: int i;
122:
123: if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
124: me = GC_lookup_thread(pthread_self());
125: /* The lookup here is safe, since I'm doing this on behalf */
126: /* of a thread which holds the allocation lock in order */
127: /* to stop the world. Thus concurrent modification of the */
128: /* data structure is impossible. */
129: if (PLEASE_STOP != me -> stop) {
130: /* Misdirected signal. */
131: pthread_mutex_unlock(&GC_suspend_lock);
132: return;
133: }
134: pthread_mutex_lock(&GC_suspend_lock);
135: me -> stack_ptr = (ptr_t)(&dummy);
136: me -> stop = STOPPED;
137: pthread_cond_signal(&GC_suspend_ack_cv);
138: pthread_cond_wait(&GC_continue_cv, &GC_suspend_lock);
139: pthread_mutex_unlock(&GC_suspend_lock);
140: /* GC_printf1("Continuing 0x%x\n", pthread_self()); */
141: }
142:
143:
144: GC_bool GC_thr_initialized = FALSE;
145:
146: size_t GC_min_stack_sz;
147:
148: # define N_FREE_LISTS 25
149: ptr_t GC_stack_free_lists[N_FREE_LISTS] = { 0 };
150: /* GC_stack_free_lists[i] is free list for stacks of */
151: /* size GC_min_stack_sz*2**i. */
152: /* Free lists are linked through first word. */
153:
154: /* Return a stack of size at least *stack_size. *stack_size is */
155: /* replaced by the actual stack size. */
156: /* Caller holds allocation lock. */
157: ptr_t GC_stack_alloc(size_t * stack_size)
158: {
159: register size_t requested_sz = *stack_size;
160: register size_t search_sz = GC_min_stack_sz;
161: register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
162: register ptr_t result;
163:
164: while (search_sz < requested_sz) {
165: search_sz *= 2;
166: index++;
167: }
168: if ((result = GC_stack_free_lists[index]) == 0
169: && (result = GC_stack_free_lists[index+1]) != 0) {
170: /* Try next size up. */
171: search_sz *= 2; index++;
172: }
173: if (result != 0) {
174: GC_stack_free_lists[index] = *(ptr_t *)result;
175: } else {
1.4 ! noro 176: result = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_size);
! 177: result = (ptr_t)(((word)result + GC_page_size) & ~(GC_page_size - 1));
1.1 noro 178: /* Protect hottest page to detect overflow. */
1.2 noro 179: # ifdef STACK_GROWS_UP
1.4 ! noro 180: /* mprotect(result + search_sz, GC_page_size, PROT_NONE); */
1.2 noro 181: # else
1.4 ! noro 182: /* mprotect(result, GC_page_size, PROT_NONE); */
! 183: result += GC_page_size;
1.2 noro 184: # endif
1.1 noro 185: }
186: *stack_size = search_sz;
187: return(result);
188: }
189:
190: /* Caller holds allocation lock. */
191: void GC_stack_free(ptr_t stack, size_t size)
192: {
193: register int index = 0;
194: register size_t search_sz = GC_min_stack_sz;
195:
196: while (search_sz < size) {
197: search_sz *= 2;
198: index++;
199: }
200: if (search_sz != size) ABORT("Bad stack size");
201: *(ptr_t *)stack = GC_stack_free_lists[index];
202: GC_stack_free_lists[index] = stack;
203: }
204:
205:
206:
207: # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
208: volatile GC_thread GC_threads[THREAD_TABLE_SZ];
209:
1.2 noro 210: void GC_push_thread_structures GC_PROTO((void))
211: {
212: GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
213: }
214:
1.1 noro 215: /* Add a thread to GC_threads. We assume it wasn't already there. */
216: /* Caller holds allocation lock. */
217: GC_thread GC_new_thread(pthread_t id)
218: {
219: int hv = ((word)id) % THREAD_TABLE_SZ;
220: GC_thread result;
221: static struct GC_Thread_Rep first_thread;
222: static GC_bool first_thread_used = FALSE;
223:
224: if (!first_thread_used) {
225: result = &first_thread;
226: first_thread_used = TRUE;
227: /* Dont acquire allocation lock, since we may already hold it. */
228: } else {
229: result = (struct GC_Thread_Rep *)
1.2 noro 230: GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
1.1 noro 231: }
232: if (result == 0) return(0);
233: result -> id = id;
234: result -> next = GC_threads[hv];
235: GC_threads[hv] = result;
236: /* result -> flags = 0; */
237: /* result -> stop = 0; */
238: return(result);
239: }
240:
241: /* Delete a thread from GC_threads. We assume it is there. */
242: /* (The code intentionally traps if it wasn't.) */
243: /* Caller holds allocation lock. */
244: void GC_delete_thread(pthread_t id)
245: {
246: int hv = ((word)id) % THREAD_TABLE_SZ;
247: register GC_thread p = GC_threads[hv];
248: register GC_thread prev = 0;
249:
250: while (!pthread_equal(p -> id, id)) {
251: prev = p;
252: p = p -> next;
253: }
254: if (prev == 0) {
255: GC_threads[hv] = p -> next;
256: } else {
257: prev -> next = p -> next;
258: }
259: }
260:
261: /* If a thread has been joined, but we have not yet */
262: /* been notified, then there may be more than one thread */
263: /* in the table with the same pthread id. */
264: /* This is OK, but we need a way to delete a specific one. */
265: void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
266: {
267: int hv = ((word)id) % THREAD_TABLE_SZ;
268: register GC_thread p = GC_threads[hv];
269: register GC_thread prev = 0;
270:
271: while (p != gc_id) {
272: prev = p;
273: p = p -> next;
274: }
275: if (prev == 0) {
276: GC_threads[hv] = p -> next;
277: } else {
278: prev -> next = p -> next;
279: }
280: }
281:
282: /* Return a GC_thread corresponding to a given thread_t. */
283: /* Returns 0 if it's not there. */
284: /* Caller holds allocation lock or otherwise inhibits */
285: /* updates. */
286: /* If there is more than one thread with the given id we */
287: /* return the most recent one. */
288: GC_thread GC_lookup_thread(pthread_t id)
289: {
290: int hv = ((word)id) % THREAD_TABLE_SZ;
291: register GC_thread p = GC_threads[hv];
292:
293: while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
294: return(p);
295: }
296:
297:
298: /* Caller holds allocation lock. */
299: void GC_stop_world()
300: {
301: pthread_t my_thread = pthread_self();
302: register int i;
303: register GC_thread p;
304: register int result;
305: struct timespec timeout;
306:
307: for (i = 0; i < THREAD_TABLE_SZ; i++) {
308: for (p = GC_threads[i]; p != 0; p = p -> next) {
309: if (p -> id != my_thread) {
310: if (p -> flags & FINISHED) {
311: p -> stop = STOPPED;
312: continue;
313: }
314: p -> stop = PLEASE_STOP;
315: result = pthread_kill(p -> id, SIG_SUSPEND);
316: /* GC_printf1("Sent signal to 0x%x\n", p -> id); */
317: switch(result) {
318: case ESRCH:
319: /* Not really there anymore. Possible? */
320: p -> stop = STOPPED;
321: break;
322: case 0:
323: break;
324: default:
325: ABORT("pthread_kill failed");
326: }
327: }
328: }
329: }
330: pthread_mutex_lock(&GC_suspend_lock);
331: for (i = 0; i < THREAD_TABLE_SZ; i++) {
332: for (p = GC_threads[i]; p != 0; p = p -> next) {
333: while (p -> id != my_thread && p -> stop != STOPPED) {
334: clock_gettime(CLOCK_REALTIME, &timeout);
335: timeout.tv_nsec += 50000000; /* 50 msecs */
336: if (timeout.tv_nsec >= 1000000000) {
337: timeout.tv_nsec -= 1000000000;
338: ++timeout.tv_sec;
339: }
340: result = pthread_cond_timedwait(&GC_suspend_ack_cv,
341: &GC_suspend_lock,
342: &timeout);
343: if (result == ETIMEDOUT) {
344: /* Signal was lost or misdirected. Try again. */
345: /* Duplicate signals should be benign. */
346: result = pthread_kill(p -> id, SIG_SUSPEND);
347: }
348: }
349: }
350: }
351: pthread_mutex_unlock(&GC_suspend_lock);
352: /* GC_printf1("World stopped 0x%x\n", pthread_self()); */
353: }
354:
355: /* Caller holds allocation lock. */
356: void GC_start_world()
357: {
358: GC_thread p;
359: unsigned i;
360:
361: /* GC_printf0("World starting\n"); */
362: for (i = 0; i < THREAD_TABLE_SZ; i++) {
363: for (p = GC_threads[i]; p != 0; p = p -> next) {
364: p -> stop = NOT_STOPPED;
365: }
366: }
367: pthread_mutex_lock(&GC_suspend_lock);
368: /* All other threads are at pthread_cond_wait in signal handler. */
369: /* Otherwise we couldn't have acquired the lock. */
370: pthread_mutex_unlock(&GC_suspend_lock);
371: pthread_cond_broadcast(&GC_continue_cv);
372: }
373:
374: # ifdef MMAP_STACKS
375: --> not really supported yet.
376: int GC_is_thread_stack(ptr_t addr)
377: {
378: register int i;
379: register GC_thread p;
380:
381: for (i = 0; i < THREAD_TABLE_SZ; i++) {
382: for (p = GC_threads[i]; p != 0; p = p -> next) {
383: if (p -> stack_size != 0) {
384: if (p -> stack <= addr &&
385: addr < p -> stack + p -> stack_size)
386: return 1;
387: }
388: }
389: }
390: return 0;
391: }
392: # endif
393:
1.2 noro 394: /* We hold allocation lock. Should do exactly the right thing if the */
395: /* world is stopped. Should not fail if it isn't. */
1.1 noro 396: void GC_push_all_stacks()
397: {
398: register int i;
399: register GC_thread p;
400: register ptr_t sp = GC_approx_sp();
1.2 noro 401: register ptr_t hot, cold;
1.1 noro 402: pthread_t me = pthread_self();
403:
404: if (!GC_thr_initialized) GC_thr_init();
405: /* GC_printf1("Pushing stacks from thread 0x%x\n", me); */
406: for (i = 0; i < THREAD_TABLE_SZ; i++) {
407: for (p = GC_threads[i]; p != 0; p = p -> next) {
408: if (p -> flags & FINISHED) continue;
409: if (pthread_equal(p -> id, me)) {
1.2 noro 410: hot = GC_approx_sp();
1.1 noro 411: } else {
1.2 noro 412: hot = p -> stack_ptr;
1.1 noro 413: }
414: if (p -> stack_size != 0) {
1.2 noro 415: # ifdef STACK_GROWS_UP
416: cold = p -> stack;
417: # else
418: cold = p -> stack + p -> stack_size;
419: # endif
1.1 noro 420: } else {
421: /* The original stack. */
1.2 noro 422: cold = GC_stackbottom;
1.1 noro 423: }
1.2 noro 424: # ifdef STACK_GROWS_UP
425: GC_push_all_stack(cold, hot);
426: # else
427: GC_push_all_stack(hot, cold);
428: # endif
1.1 noro 429: }
430: }
431: }
432:
433:
434: /* We hold the allocation lock. */
435: void GC_thr_init()
436: {
437: GC_thread t;
438: struct sigaction act;
439:
440: if (GC_thr_initialized) return;
441: GC_thr_initialized = TRUE;
442: GC_min_stack_sz = HBLKSIZE;
443: (void) sigaction(SIG_SUSPEND, 0, &act);
444: if (act.sa_handler != SIG_DFL)
445: ABORT("Previously installed SIG_SUSPEND handler");
446: /* Install handler. */
447: act.sa_handler = GC_suspend_handler;
448: act.sa_flags = SA_RESTART;
449: (void) sigemptyset(&act.sa_mask);
450: if (0 != sigaction(SIG_SUSPEND, &act, 0))
451: ABORT("Failed to install SIG_SUSPEND handler");
452: /* Add the initial thread, so we can stop it. */
453: t = GC_new_thread(pthread_self());
454: t -> stack_size = 0;
455: t -> stack_ptr = (ptr_t)(&t);
456: t -> flags = DETACHED;
457: }
458:
459: int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
460: {
461: sigset_t fudged_set;
462:
463: if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
464: fudged_set = *set;
465: sigdelset(&fudged_set, SIG_SUSPEND);
466: set = &fudged_set;
467: }
468: return(pthread_sigmask(how, set, oset));
469: }
470:
471: struct start_info {
472: void *(*start_routine)(void *);
473: void *arg;
474: word flags;
475: ptr_t stack;
476: size_t stack_size;
477: sem_t registered; /* 1 ==> in our thread table, but */
478: /* parent hasn't yet noticed. */
479: };
480:
481: void GC_thread_exit_proc(void *arg)
482: {
483: GC_thread me;
484:
485: LOCK();
486: me = GC_lookup_thread(pthread_self());
487: if (me -> flags & DETACHED) {
488: GC_delete_thread(pthread_self());
489: } else {
490: me -> flags |= FINISHED;
491: }
492: UNLOCK();
493: }
494:
495: int GC_pthread_join(pthread_t thread, void **retval)
496: {
497: int result;
498: GC_thread thread_gc_id;
499:
500: LOCK();
501: thread_gc_id = GC_lookup_thread(thread);
502: /* This is guaranteed to be the intended one, since the thread id */
503: /* cant have been recycled by pthreads. */
504: UNLOCK();
505: result = pthread_join(thread, retval);
506: /* Some versions of the Irix pthreads library can erroneously */
507: /* return EINTR when the call succeeds. */
508: if (EINTR == result) result = 0;
1.2 noro 509: if (result == 0) {
510: LOCK();
511: /* Here the pthread thread id may have been recycled. */
512: GC_delete_gc_thread(thread, thread_gc_id);
513: UNLOCK();
514: }
515: return result;
516: }
517:
518: int GC_pthread_detach(pthread_t thread)
519: {
520: int result;
521: GC_thread thread_gc_id;
522:
1.1 noro 523: LOCK();
1.2 noro 524: thread_gc_id = GC_lookup_thread(thread);
1.1 noro 525: UNLOCK();
1.4 ! noro 526: result = pthread_detach(thread);
1.2 noro 527: if (result == 0) {
528: LOCK();
529: thread_gc_id -> flags |= DETACHED;
530: /* Here the pthread thread id may have been recycled. */
531: if (thread_gc_id -> flags & FINISHED) {
532: GC_delete_gc_thread(thread, thread_gc_id);
533: }
534: UNLOCK();
535: }
1.1 noro 536: return result;
537: }
538:
539: void * GC_start_routine(void * arg)
540: {
541: struct start_info * si = arg;
542: void * result;
543: GC_thread me;
544: pthread_t my_pthread;
545: void *(*start)(void *);
546: void *start_arg;
547:
548: my_pthread = pthread_self();
549: /* If a GC occurs before the thread is registered, that GC will */
550: /* ignore this thread. That's fine, since it will block trying to */
551: /* acquire the allocation lock, and won't yet hold interesting */
552: /* pointers. */
553: LOCK();
554: /* We register the thread here instead of in the parent, so that */
555: /* we don't need to hold the allocation lock during pthread_create. */
556: /* Holding the allocation lock there would make REDIRECT_MALLOC */
557: /* impossible. It probably still doesn't work, but we're a little */
558: /* closer ... */
559: /* This unfortunately means that we have to be careful the parent */
560: /* doesn't try to do a pthread_join before we're registered. */
561: me = GC_new_thread(my_pthread);
562: me -> flags = si -> flags;
563: me -> stack = si -> stack;
564: me -> stack_size = si -> stack_size;
565: me -> stack_ptr = (ptr_t)si -> stack + si -> stack_size - sizeof(word);
566: UNLOCK();
567: start = si -> start_routine;
568: start_arg = si -> arg;
569: sem_post(&(si -> registered));
570: pthread_cleanup_push(GC_thread_exit_proc, 0);
571: result = (*start)(start_arg);
572: me -> status = result;
573: me -> flags |= FINISHED;
574: pthread_cleanup_pop(1);
575: /* This involves acquiring the lock, ensuring that we can't exit */
576: /* while a collection that thinks we're alive is trying to stop */
577: /* us. */
578: return(result);
579: }
580:
1.2 noro 581: # define copy_attr(pa_ptr, source) *(pa_ptr) = *(source)
582:
1.1 noro 583: int
584: GC_pthread_create(pthread_t *new_thread,
585: const pthread_attr_t *attr,
586: void *(*start_routine)(void *), void *arg)
587: {
588: int result;
589: GC_thread t;
590: void * stack;
591: size_t stacksize;
592: pthread_attr_t new_attr;
593: int detachstate;
594: word my_flags = 0;
595: struct start_info * si = GC_malloc(sizeof(struct start_info));
596: /* This is otherwise saved only in an area mmapped by the thread */
597: /* library, which isn't visible to the collector. */
598:
599: if (0 == si) return(ENOMEM);
1.2 noro 600: if (0 != sem_init(&(si -> registered), 0, 0)) {
601: ABORT("sem_init failed");
602: }
1.1 noro 603: si -> start_routine = start_routine;
604: si -> arg = arg;
605: LOCK();
1.4 ! noro 606: if (!GC_is_initialized) GC_init();
1.1 noro 607: if (NULL == attr) {
608: stack = 0;
609: (void) pthread_attr_init(&new_attr);
610: } else {
1.2 noro 611: copy_attr(&new_attr, attr);
1.1 noro 612: pthread_attr_getstackaddr(&new_attr, &stack);
613: }
614: pthread_attr_getstacksize(&new_attr, &stacksize);
615: pthread_attr_getdetachstate(&new_attr, &detachstate);
616: if (stacksize < GC_min_stack_sz) ABORT("Stack too small");
617: if (0 == stack) {
618: stack = (void *)GC_stack_alloc(&stacksize);
619: if (0 == stack) {
620: UNLOCK();
621: return(ENOMEM);
622: }
623: pthread_attr_setstackaddr(&new_attr, stack);
624: } else {
625: my_flags |= CLIENT_OWNS_STACK;
626: }
627: if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
628: si -> flags = my_flags;
629: si -> stack = stack;
630: si -> stack_size = stacksize;
631: result = pthread_create(new_thread, &new_attr, GC_start_routine, si);
632: if (0 == new_thread && !(my_flags & CLIENT_OWNS_STACK)) {
633: GC_stack_free(stack, stacksize);
634: }
635: UNLOCK();
636: /* Wait until child has been added to the thread table. */
637: /* This also ensures that we hold onto si until the child is done */
638: /* with it. Thus it doesn't matter whether it is otherwise */
639: /* visible to the collector. */
1.2 noro 640: while (0 != sem_wait(&(si -> registered))) {
641: if (errno != EINTR) {
642: GC_printf1("Sem_wait: errno = %ld\n", (unsigned long) errno);
643: ABORT("sem_wait failed");
644: }
645: }
1.1 noro 646: sem_destroy(&(si -> registered));
1.2 noro 647: pthread_attr_destroy(&new_attr); /* Probably unnecessary under Irix */
1.1 noro 648: return(result);
649: }
650:
1.2 noro 651: VOLATILE GC_bool GC_collecting = 0;
652: /* A hint that we're in the collector and */
1.1 noro 653: /* holding the allocation lock for an */
654: /* extended period. */
655:
656: /* Reasonably fast spin locks. Basically the same implementation */
1.2 noro 657: /* as STL alloc.h. */
658:
659: #define SLEEP_THRESHOLD 3
1.1 noro 660:
661: unsigned long GC_allocate_lock = 0;
1.4 ! noro 662: # define GC_TRY_LOCK() !GC_test_and_set(&GC_allocate_lock)
1.2 noro 663: # define GC_LOCK_TAKEN GC_allocate_lock
1.1 noro 664:
665: void GC_lock()
666: {
667: # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
668: # define high_spin_max 1000 /* spin cycles for multiprocessor */
669: static unsigned spin_max = low_spin_max;
670: unsigned my_spin_max;
671: static unsigned last_spins = 0;
672: unsigned my_last_spins;
673: volatile unsigned junk;
674: # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
675: int i;
676:
1.2 noro 677: if (GC_TRY_LOCK()) {
1.1 noro 678: return;
679: }
680: junk = 0;
681: my_spin_max = spin_max;
682: my_last_spins = last_spins;
683: for (i = 0; i < my_spin_max; i++) {
684: if (GC_collecting) goto yield;
1.2 noro 685: if (i < my_last_spins/2 || GC_LOCK_TAKEN) {
1.1 noro 686: PAUSE;
687: continue;
688: }
1.2 noro 689: if (GC_TRY_LOCK()) {
1.1 noro 690: /*
691: * got it!
692: * Spinning worked. Thus we're probably not being scheduled
693: * against the other process with which we were contending.
694: * Thus it makes sense to spin longer the next time.
695: */
696: last_spins = i;
697: spin_max = high_spin_max;
698: return;
699: }
700: }
701: /* We are probably being scheduled against the other process. Sleep. */
702: spin_max = low_spin_max;
703: yield:
704: for (i = 0;; ++i) {
1.2 noro 705: if (GC_TRY_LOCK()) {
1.1 noro 706: return;
707: }
708: if (i < SLEEP_THRESHOLD) {
709: sched_yield();
710: } else {
711: struct timespec ts;
712:
713: if (i > 26) i = 26;
714: /* Don't wait for more than about 60msecs, even */
715: /* under extreme contention. */
716: ts.tv_sec = 0;
717: ts.tv_nsec = 1 << i;
718: nanosleep(&ts, 0);
719: }
720: }
721: }
722:
723: # else
724:
725: #ifndef LINT
726: int GC_no_Irix_threads;
727: #endif
728:
1.4 ! noro 729: # endif /* GC_IRIX_THREADS */
1.1 noro 730:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>