Annotation of OpenXM_contrib/gc/linux_threads.c, Revision 1.1.1.3
1.1 maekawa 1: /*
2: * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3: * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4: * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5: *
6: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8: *
9: * Permission is hereby granted to use or copy this program
10: * for any purpose, provided the above notices are retained on all copies.
11: * Permission to modify the code and to distribute modified code is granted,
12: * provided the above notices are retained, and a notice that the code was
13: * modified is included with the above copyright notice.
14: */
15: /*
16: * Support code for LinuxThreads, the clone()-based kernel
17: * thread package for Linux which is included in libc6.
18: *
19: * This code relies on implementation details of LinuxThreads,
20: * (i.e. properties not guaranteed by the Pthread standard):
21: *
22: * - the function GC_linux_thread_top_of_stack(void)
23: * relies on the way LinuxThreads lays out thread stacks
24: * in the address space.
25: *
26: * Note that there is a lot of code duplication between linux_threads.c
27: * and irix_threads.c; any changes made here may need to be reflected
28: * there too.
29: */
30:
31: /* #define DEBUG_THREADS 1 */
32:
33: /* ANSI C requires that a compilation unit contains something */
34: # include "gc_priv.h"
35:
36: # if defined(LINUX_THREADS)
37:
38: # include <pthread.h>
1.1.1.3 ! maekawa 39: # include <sched.h>
1.1 maekawa 40: # include <time.h>
41: # include <errno.h>
42: # include <unistd.h>
43: # include <sys/mman.h>
44: # include <sys/time.h>
45: # include <semaphore.h>
1.1.1.3 ! maekawa 46: # include <signal.h>
! 47:
! 48: #ifdef USE_LD_WRAP
! 49: # define WRAP_FUNC(f) __wrap_##f
! 50: # define REAL_FUNC(f) __real_##f
! 51: #else
! 52: # define WRAP_FUNC(f) GC_##f
! 53: # define REAL_FUNC(f) f
! 54: # undef pthread_create
! 55: # undef pthread_sigmask
! 56: # undef pthread_join
! 57: #endif
1.1 maekawa 58:
59:
60: void GC_thr_init();
61:
62: #if 0
63: void GC_print_sig_mask()
64: {
65: sigset_t blocked;
66: int i;
67:
68: if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
69: ABORT("pthread_sigmask");
70: GC_printf0("Blocked: ");
71: for (i = 1; i <= MAXSIG; i++) {
72: if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
73: }
74: GC_printf0("\n");
75: }
76: #endif
77:
78: /* We use the allocation lock to protect thread-related data structures. */
79:
80: /* The set of all known threads. We intercept thread creation and */
81: /* joins. We never actually create detached threads. We allocate all */
82: /* new thread stacks ourselves. These allow us to maintain this */
83: /* data structure. */
84: /* Protected by GC_thr_lock. */
85: /* Some of this should be declared volatile, but that's incosnsistent */
86: /* with some library routine declarations. */
87: typedef struct GC_Thread_Rep {
88: struct GC_Thread_Rep * next; /* More recently allocated threads */
89: /* with a given pthread id come */
90: /* first. (All but the first are */
91: /* guaranteed to be dead, but we may */
92: /* not yet have registered the join.) */
93: pthread_t id;
94: word flags;
95: # define FINISHED 1 /* Thread has exited. */
96: # define DETACHED 2 /* Thread is intended to be detached. */
97: # define MAIN_THREAD 4 /* True for the original thread only. */
98:
1.1.1.3 ! maekawa 99: ptr_t stack_end; /* Cold end of the stack. */
! 100: ptr_t stack_ptr; /* Valid only when stopped. */
! 101: # ifdef IA64
! 102: ptr_t backing_store_end;
! 103: ptr_t backing_store_ptr;
! 104: # endif
1.1 maekawa 105: int signal;
106: void * status; /* The value returned from the thread. */
107: /* Used only to avoid premature */
108: /* reclamation of any data it might */
109: /* reference. */
110: } * GC_thread;
111:
112: GC_thread GC_lookup_thread(pthread_t id);
113:
114: /*
115: * The only way to suspend threads given the pthread interface is to send
116: * signals. We can't use SIGSTOP directly, because we need to get the
117: * thread to save its stack pointer in the GC thread table before
118: * suspending. So we have to reserve a signal of our own for this.
119: * This means we have to intercept client calls to change the signal mask.
120: * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
121: * so we need to reuse something else. I chose SIGPWR.
122: * (Perhaps SIGUNUSED would be a better choice.)
123: */
124: #define SIG_SUSPEND SIGPWR
125:
126: #define SIG_RESTART SIGXCPU
127:
128: sem_t GC_suspend_ack_sem;
129:
130: /*
131: GC_linux_thread_top_of_stack() relies on implementation details of
132: LinuxThreads, namely that thread stacks are allocated on 2M boundaries
133: and grow to no more than 2M.
134: To make sure that we're using LinuxThreads and not some other thread
1.1.1.2 maekawa 135: package, we generate a dummy reference to `pthread_kill_other_threads_np'
136: (was `__pthread_initial_thread_bos' but that disappeared),
1.1 maekawa 137: which is a symbol defined in LinuxThreads, but (hopefully) not in other
138: thread packages.
139: */
1.1.1.2 maekawa 140: void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
1.1 maekawa 141:
142: #define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
143:
144: static inline ptr_t GC_linux_thread_top_of_stack(void)
145: {
146: char *sp = GC_approx_sp();
147: ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
148: #if DEBUG_THREADS
149: GC_printf1("SP = %lx\n", (unsigned long)sp);
150: GC_printf1("TOS = %lx\n", (unsigned long)tos);
151: #endif
152: return tos;
153: }
154:
1.1.1.3 ! maekawa 155: #ifdef IA64
! 156: extern word GC_save_regs_in_stack();
! 157: #endif
! 158:
1.1 maekawa 159: void GC_suspend_handler(int sig)
160: {
161: int dummy;
162: pthread_t my_thread = pthread_self();
163: GC_thread me;
164: sigset_t all_sigs;
165: sigset_t old_sigs;
166: int i;
167: sigset_t mask;
168:
169: if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
170:
171: #if DEBUG_THREADS
172: GC_printf1("Suspending 0x%x\n", my_thread);
173: #endif
174:
175: me = GC_lookup_thread(my_thread);
176: /* The lookup here is safe, since I'm doing this on behalf */
177: /* of a thread which holds the allocation lock in order */
178: /* to stop the world. Thus concurrent modification of the */
179: /* data structure is impossible. */
180: me -> stack_ptr = (ptr_t)(&dummy);
1.1.1.3 ! maekawa 181: # ifdef IA64
! 182: me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
! 183: # endif
1.1 maekawa 184:
185: /* Tell the thread that wants to stop the world that this */
186: /* thread has been stopped. Note that sem_post() is */
187: /* the only async-signal-safe primitive in LinuxThreads. */
188: sem_post(&GC_suspend_ack_sem);
189:
190: /* Wait until that thread tells us to restart by sending */
191: /* this thread a SIG_RESTART signal. */
192: /* SIG_RESTART should be masked at this point. Thus there */
193: /* is no race. */
194: if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
195: if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
1.1.1.3 ! maekawa 196: # ifdef NO_SIGNALS
! 197: if (sigdelset(&mask, SIGINT) != 0) ABORT("sigdelset() failed");
! 198: if (sigdelset(&mask, SIGQUIT) != 0) ABORT("sigdelset() failed");
! 199: if (sigdelset(&mask, SIGTERM) != 0) ABORT("sigdelset() failed");
! 200: # endif
1.1 maekawa 201: do {
202: me->signal = 0;
203: sigsuspend(&mask); /* Wait for signal */
204: } while (me->signal != SIG_RESTART);
205:
206: #if DEBUG_THREADS
207: GC_printf1("Continuing 0x%x\n", my_thread);
208: #endif
209: }
210:
211: void GC_restart_handler(int sig)
212: {
213: GC_thread me;
214:
215: if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
216:
217: /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
218: /* The lookup here is safe, since I'm doing this on behalf */
219: /* of a thread which holds the allocation lock in order */
220: /* to stop the world. Thus concurrent modification of the */
221: /* data structure is impossible. */
222: me = GC_lookup_thread(pthread_self());
223: me->signal = SIG_RESTART;
224:
225: /*
226: ** Note: even if we didn't do anything useful here,
227: ** it would still be necessary to have a signal handler,
228: ** rather than ignoring the signals, otherwise
229: ** the signals will not be delivered at all, and
230: ** will thus not interrupt the sigsuspend() above.
231: */
232:
233: #if DEBUG_THREADS
234: GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
235: #endif
236: }
237:
238: GC_bool GC_thr_initialized = FALSE;
239:
240: # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
241: volatile GC_thread GC_threads[THREAD_TABLE_SZ];
242:
243: /* Add a thread to GC_threads. We assume it wasn't already there. */
244: /* Caller holds allocation lock. */
245: GC_thread GC_new_thread(pthread_t id)
246: {
247: int hv = ((word)id) % THREAD_TABLE_SZ;
248: GC_thread result;
249: static struct GC_Thread_Rep first_thread;
250: static GC_bool first_thread_used = FALSE;
251:
252: if (!first_thread_used) {
253: result = &first_thread;
254: first_thread_used = TRUE;
255: /* Dont acquire allocation lock, since we may already hold it. */
256: } else {
257: result = (struct GC_Thread_Rep *)
258: GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
259: }
260: if (result == 0) return(0);
261: result -> id = id;
262: result -> next = GC_threads[hv];
263: GC_threads[hv] = result;
264: /* result -> flags = 0; */
265: return(result);
266: }
267:
268: /* Delete a thread from GC_threads. We assume it is there. */
269: /* (The code intentionally traps if it wasn't.) */
270: /* Caller holds allocation lock. */
271: void GC_delete_thread(pthread_t id)
272: {
273: int hv = ((word)id) % THREAD_TABLE_SZ;
274: register GC_thread p = GC_threads[hv];
275: register GC_thread prev = 0;
276:
277: while (!pthread_equal(p -> id, id)) {
278: prev = p;
279: p = p -> next;
280: }
281: if (prev == 0) {
282: GC_threads[hv] = p -> next;
283: } else {
284: prev -> next = p -> next;
285: }
286: }
287:
288: /* If a thread has been joined, but we have not yet */
289: /* been notified, then there may be more than one thread */
290: /* in the table with the same pthread id. */
291: /* This is OK, but we need a way to delete a specific one. */
292: void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
293: {
294: int hv = ((word)id) % THREAD_TABLE_SZ;
295: register GC_thread p = GC_threads[hv];
296: register GC_thread prev = 0;
297:
298: while (p != gc_id) {
299: prev = p;
300: p = p -> next;
301: }
302: if (prev == 0) {
303: GC_threads[hv] = p -> next;
304: } else {
305: prev -> next = p -> next;
306: }
307: }
308:
309: /* Return a GC_thread corresponding to a given thread_t. */
310: /* Returns 0 if it's not there. */
311: /* Caller holds allocation lock or otherwise inhibits */
312: /* updates. */
313: /* If there is more than one thread with the given id we */
314: /* return the most recent one. */
315: GC_thread GC_lookup_thread(pthread_t id)
316: {
317: int hv = ((word)id) % THREAD_TABLE_SZ;
318: register GC_thread p = GC_threads[hv];
319:
320: while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
321: return(p);
322: }
323:
324: /* Caller holds allocation lock. */
325: void GC_stop_world()
326: {
327: pthread_t my_thread = pthread_self();
328: register int i;
329: register GC_thread p;
330: register int n_live_threads = 0;
331: register int result;
332:
333: for (i = 0; i < THREAD_TABLE_SZ; i++) {
334: for (p = GC_threads[i]; p != 0; p = p -> next) {
335: if (p -> id != my_thread) {
336: if (p -> flags & FINISHED) continue;
337: n_live_threads++;
338: #if DEBUG_THREADS
339: GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
340: #endif
341: result = pthread_kill(p -> id, SIG_SUSPEND);
342: switch(result) {
343: case ESRCH:
344: /* Not really there anymore. Possible? */
345: n_live_threads--;
346: break;
347: case 0:
348: break;
349: default:
350: ABORT("pthread_kill failed");
351: }
352: }
353: }
354: }
355: for (i = 0; i < n_live_threads; i++) {
356: sem_wait(&GC_suspend_ack_sem);
357: }
358: #if DEBUG_THREADS
359: GC_printf1("World stopped 0x%x\n", pthread_self());
360: #endif
361: }
362:
363: /* Caller holds allocation lock. */
364: void GC_start_world()
365: {
366: pthread_t my_thread = pthread_self();
367: register int i;
368: register GC_thread p;
369: register int n_live_threads = 0;
370: register int result;
371:
372: # if DEBUG_THREADS
373: GC_printf0("World starting\n");
374: # endif
375:
376: for (i = 0; i < THREAD_TABLE_SZ; i++) {
377: for (p = GC_threads[i]; p != 0; p = p -> next) {
378: if (p -> id != my_thread) {
379: if (p -> flags & FINISHED) continue;
380: n_live_threads++;
381: #if DEBUG_THREADS
382: GC_printf1("Sending restart signal to 0x%x\n", p -> id);
383: #endif
384: result = pthread_kill(p -> id, SIG_RESTART);
385: switch(result) {
386: case ESRCH:
387: /* Not really there anymore. Possible? */
388: n_live_threads--;
389: break;
390: case 0:
391: break;
392: default:
393: ABORT("pthread_kill failed");
394: }
395: }
396: }
397: }
398: #if DEBUG_THREADS
399: GC_printf0("World started\n");
400: #endif
401: }
402:
1.1.1.3 ! maekawa 403: # ifdef IA64
! 404: # define IF_IA64(x) x
! 405: # else
! 406: # define IF_IA64(x)
! 407: # endif
! 408: /* We hold allocation lock. Should do exactly the right thing if the */
! 409: /* world is stopped. Should not fail if it isn't. */
1.1 maekawa 410: void GC_push_all_stacks()
411: {
1.1.1.3 ! maekawa 412: int i;
! 413: GC_thread p;
! 414: ptr_t sp = GC_approx_sp();
! 415: ptr_t lo, hi;
! 416: /* On IA64, we also need to scan the register backing store. */
! 417: IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
1.1 maekawa 418: pthread_t me = pthread_self();
419:
420: if (!GC_thr_initialized) GC_thr_init();
421: #if DEBUG_THREADS
422: GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
423: #endif
424: for (i = 0; i < THREAD_TABLE_SZ; i++) {
425: for (p = GC_threads[i]; p != 0; p = p -> next) {
426: if (p -> flags & FINISHED) continue;
427: if (pthread_equal(p -> id, me)) {
428: lo = GC_approx_sp();
1.1.1.3 ! maekawa 429: IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
1.1 maekawa 430: } else {
431: lo = p -> stack_ptr;
1.1.1.3 ! maekawa 432: IF_IA64(bs_hi = p -> backing_store_ptr;)
1.1 maekawa 433: }
434: if ((p -> flags & MAIN_THREAD) == 0) {
1.1.1.3 ! maekawa 435: hi = p -> stack_end;
! 436: IF_IA64(bs_lo = p -> backing_store_end);
1.1 maekawa 437: } else {
438: /* The original stack. */
439: hi = GC_stackbottom;
1.1.1.3 ! maekawa 440: IF_IA64(bs_lo = BACKING_STORE_BASE;)
1.1 maekawa 441: }
442: #if DEBUG_THREADS
443: GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
444: (unsigned long) p -> id,
445: (unsigned long) lo, (unsigned long) hi);
446: #endif
1.1.1.3 ! maekawa 447: if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
1.1 maekawa 448: GC_push_all_stack(lo, hi);
1.1.1.3 ! maekawa 449: # ifdef IA64
! 450: if (pthread_equal(p -> id, me)) {
! 451: GC_push_all_eager(bs_lo, bs_hi);
! 452: } else {
! 453: GC_push_all_stack(bs_lo, bs_hi);
! 454: }
! 455: # endif
1.1 maekawa 456: }
457: }
458: }
459:
460:
461: /* We hold the allocation lock. */
462: void GC_thr_init()
463: {
1.1.1.3 ! maekawa 464: int dummy;
1.1 maekawa 465: GC_thread t;
466: struct sigaction act;
467:
468: if (GC_thr_initialized) return;
469: GC_thr_initialized = TRUE;
470:
471: if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
472: ABORT("sem_init failed");
473:
474: act.sa_flags = SA_RESTART;
475: if (sigfillset(&act.sa_mask) != 0) {
476: ABORT("sigfillset() failed");
477: }
1.1.1.3 ! maekawa 478: # ifdef NO_SIGNALS
! 479: if (sigdelset(&act.sa_mask, SIGINT) != 0
! 480: || sigdelset(&act.sa_mask, SIGQUIT != 0)
! 481: || sigdelset(&act.sa_mask, SIGTERM != 0)) {
! 482: ABORT("sigdelset() failed");
! 483: }
! 484: # endif
! 485:
1.1 maekawa 486: /* SIG_RESTART is unmasked by the handler when necessary. */
487: act.sa_handler = GC_suspend_handler;
488: if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
489: ABORT("Cannot set SIG_SUSPEND handler");
490: }
491:
492: act.sa_handler = GC_restart_handler;
493: if (sigaction(SIG_RESTART, &act, NULL) != 0) {
494: ABORT("Cannot set SIG_SUSPEND handler");
495: }
496:
497: /* Add the initial thread, so we can stop it. */
498: t = GC_new_thread(pthread_self());
1.1.1.3 ! maekawa 499: t -> stack_ptr = (ptr_t)(&dummy);
1.1 maekawa 500: t -> flags = DETACHED | MAIN_THREAD;
501: }
502:
1.1.1.3 ! maekawa 503: int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
1.1 maekawa 504: {
505: sigset_t fudged_set;
506:
507: if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
508: fudged_set = *set;
509: sigdelset(&fudged_set, SIG_SUSPEND);
510: set = &fudged_set;
511: }
1.1.1.3 ! maekawa 512: return(REAL_FUNC(pthread_sigmask)(how, set, oset));
1.1 maekawa 513: }
514:
515: struct start_info {
516: void *(*start_routine)(void *);
517: void *arg;
518: word flags;
519: sem_t registered; /* 1 ==> in our thread table, but */
520: /* parent hasn't yet noticed. */
521: };
522:
523:
524: void GC_thread_exit_proc(void *arg)
525: {
526: GC_thread me;
527: struct start_info * si = arg;
528:
529: LOCK();
530: me = GC_lookup_thread(pthread_self());
531: if (me -> flags & DETACHED) {
532: GC_delete_thread(pthread_self());
533: } else {
534: me -> flags |= FINISHED;
535: }
1.1.1.3 ! maekawa 536: if (GC_incremental && GC_collection_in_progress()) {
! 537: int old_gc_no = GC_gc_no;
! 538:
! 539: /* Make sure that no part of our stack is still on the mark stack, */
! 540: /* since it's about to be unmapped. */
! 541: while (GC_incremental && GC_collection_in_progress()
! 542: && old_gc_no == GC_gc_no) {
! 543: ENTER_GC();
! 544: GC_collect_a_little_inner(1);
! 545: EXIT_GC();
! 546: UNLOCK();
! 547: sched_yield();
! 548: LOCK();
! 549: }
! 550: }
1.1 maekawa 551: UNLOCK();
552: }
553:
1.1.1.3 ! maekawa 554: int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1.1 maekawa 555: {
556: int result;
557: GC_thread thread_gc_id;
558:
559: LOCK();
560: thread_gc_id = GC_lookup_thread(thread);
561: /* This is guaranteed to be the intended one, since the thread id */
562: /* cant have been recycled by pthreads. */
563: UNLOCK();
1.1.1.3 ! maekawa 564: result = REAL_FUNC(pthread_join)(thread, retval);
1.1 maekawa 565: LOCK();
566: /* Here the pthread thread id may have been recycled. */
567: GC_delete_gc_thread(thread, thread_gc_id);
568: UNLOCK();
569: return result;
570: }
571:
572: void * GC_start_routine(void * arg)
573: {
1.1.1.3 ! maekawa 574: int dummy;
1.1 maekawa 575: struct start_info * si = arg;
576: void * result;
577: GC_thread me;
578: pthread_t my_pthread;
579: void *(*start)(void *);
580: void *start_arg;
581:
582: my_pthread = pthread_self();
1.1.1.3 ! maekawa 583: # ifdef DEBUG_THREADS
! 584: GC_printf1("Starting thread 0x%lx\n", my_pthread);
! 585: GC_printf1("pid = %ld\n", (long) getpid());
! 586: GC_printf1("sp = 0x%lx\n", (long) &arg);
! 587: # endif
1.1 maekawa 588: LOCK();
589: me = GC_new_thread(my_pthread);
590: me -> flags = si -> flags;
591: me -> stack_ptr = 0;
1.1.1.3 ! maekawa 592: /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
! 593: /* doesn't work because the stack base in /proc/self/stat is the */
! 594: /* one for the main thread. There is a strong argument that that's */
! 595: /* a kernel bug, but a pervasive one. */
! 596: # ifdef STACK_GROWS_DOWN
! 597: me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
! 598: & ~(GC_page_size - 1));
! 599: me -> stack_ptr = me -> stack_end - 0x10;
! 600: /* Needs to be plausible, since an asynchronous stack mark */
! 601: /* should not crash. */
! 602: # else
! 603: me -> stack_end = (ptr_t)(((word)(&dummy) & ~(GC_page_size - 1));
! 604: me -> stack_ptr = me -> stack_end + 0x10;
! 605: # endif
! 606: /* This is dubious, since we may be more than a page into the stack, */
! 607: /* and hence skip some of it, though it's not clear that matters. */
! 608: # ifdef IA64
! 609: me -> backing_store_end = (ptr_t)
! 610: (GC_save_regs_in_stack() & ~(GC_page_size - 1));
! 611: /* This is also < 100% convincing. We should also read this */
! 612: /* from /proc, but the hook to do so isn't there yet. */
! 613: # endif /* IA64 */
1.1 maekawa 614: UNLOCK();
615: start = si -> start_routine;
616: # ifdef DEBUG_THREADS
617: GC_printf1("start_routine = 0x%lx\n", start);
618: # endif
1.1.1.3 ! maekawa 619: start_arg = si -> arg;
! 620: sem_post(&(si -> registered));
! 621: pthread_cleanup_push(GC_thread_exit_proc, si);
1.1 maekawa 622: result = (*start)(start_arg);
623: #if DEBUG_THREADS
624: GC_printf1("Finishing thread 0x%x\n", pthread_self());
625: #endif
626: me -> status = result;
627: me -> flags |= FINISHED;
628: pthread_cleanup_pop(1);
629: /* Cleanup acquires lock, ensuring that we can't exit */
630: /* while a collection that thinks we're alive is trying to stop */
631: /* us. */
632: return(result);
633: }
634:
635: int
1.1.1.3 ! maekawa 636: WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1.1 maekawa 637: const pthread_attr_t *attr,
638: void *(*start_routine)(void *), void *arg)
639: {
640: int result;
641: GC_thread t;
642: pthread_t my_new_thread;
643: void * stack;
644: size_t stacksize;
645: pthread_attr_t new_attr;
646: int detachstate;
647: word my_flags = 0;
648: struct start_info * si = GC_malloc(sizeof(struct start_info));
649: /* This is otherwise saved only in an area mmapped by the thread */
650: /* library, which isn't visible to the collector. */
651:
652: if (0 == si) return(ENOMEM);
653: sem_init(&(si -> registered), 0, 0);
654: si -> start_routine = start_routine;
655: si -> arg = arg;
656: LOCK();
657: if (!GC_thr_initialized) GC_thr_init();
658: if (NULL == attr) {
659: stack = 0;
660: (void) pthread_attr_init(&new_attr);
661: } else {
662: new_attr = *attr;
663: }
664: pthread_attr_getdetachstate(&new_attr, &detachstate);
665: if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
666: si -> flags = my_flags;
667: UNLOCK();
1.1.1.3 ! maekawa 668: # ifdef DEBUG_THREADS
! 669: GC_printf1("About to start new thread from thread 0x%X\n",
! 670: pthread_self());
! 671: # endif
! 672: result = REAL_FUNC(pthread_create)(new_thread, &new_attr, GC_start_routine, si);
! 673: # ifdef DEBUG_THREADS
! 674: GC_printf1("Started thread 0x%X\n", *new_thread);
! 675: # endif
1.1 maekawa 676: /* Wait until child has been added to the thread table. */
677: /* This also ensures that we hold onto si until the child is done */
678: /* with it. Thus it doesn't matter whether it is otherwise */
679: /* visible to the collector. */
680: if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed");
681: sem_destroy(&(si -> registered));
682: /* pthread_attr_destroy(&new_attr); */
683: /* pthread_attr_destroy(&new_attr); */
684: return(result);
685: }
686:
1.1.1.3 ! maekawa 687: #if defined(USE_SPIN_LOCK)
! 688:
! 689: VOLATILE GC_bool GC_collecting = 0;
1.1 maekawa 690: /* A hint that we're in the collector and */
691: /* holding the allocation lock for an */
692: /* extended period. */
693:
694: /* Reasonably fast spin locks. Basically the same implementation */
695: /* as STL alloc.h. This isn't really the right way to do this. */
696: /* but until the POSIX scheduling mess gets straightened out ... */
697:
698: volatile unsigned int GC_allocate_lock = 0;
699:
700:
701: void GC_lock()
702: {
703: # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
704: # define high_spin_max 1000 /* spin cycles for multiprocessor */
705: static unsigned spin_max = low_spin_max;
706: unsigned my_spin_max;
707: static unsigned last_spins = 0;
708: unsigned my_last_spins;
709: volatile unsigned junk;
710: # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
711: int i;
712:
713: if (!GC_test_and_set(&GC_allocate_lock)) {
714: return;
715: }
716: junk = 0;
717: my_spin_max = spin_max;
718: my_last_spins = last_spins;
719: for (i = 0; i < my_spin_max; i++) {
720: if (GC_collecting) goto yield;
721: if (i < my_last_spins/2 || GC_allocate_lock) {
722: PAUSE;
723: continue;
724: }
725: if (!GC_test_and_set(&GC_allocate_lock)) {
726: /*
727: * got it!
728: * Spinning worked. Thus we're probably not being scheduled
729: * against the other process with which we were contending.
730: * Thus it makes sense to spin longer the next time.
731: */
732: last_spins = i;
733: spin_max = high_spin_max;
734: return;
735: }
736: }
737: /* We are probably being scheduled against the other process. Sleep. */
738: spin_max = low_spin_max;
739: yield:
740: for (i = 0;; ++i) {
741: if (!GC_test_and_set(&GC_allocate_lock)) {
742: return;
743: }
744: # define SLEEP_THRESHOLD 12
745: /* nanosleep(<= 2ms) just spins under Linux. We */
746: /* want to be careful to avoid that behavior. */
747: if (i < SLEEP_THRESHOLD) {
748: sched_yield();
749: } else {
750: struct timespec ts;
751:
752: if (i > 26) i = 26;
753: /* Don't wait for more than about 60msecs, even */
754: /* under extreme contention. */
755: ts.tv_sec = 0;
756: ts.tv_nsec = 1 << i;
757: nanosleep(&ts, 0);
758: }
759: }
760: }
1.1.1.3 ! maekawa 761:
! 762: #endif /* known architecture */
1.1 maekawa 763:
764: # endif /* LINUX_THREADS */
765:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>