version 1.1, 1999/12/03 07:39:09 |
version 1.2, 2001/04/20 07:39:18 |
|
|
/* |
/* |
* Copyright (c) 1994 by Xerox Corporation. All rights reserved. |
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. |
* Copyright (c) 1996 by Silicon Graphics. All rights reserved. |
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. |
|
* Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved. |
* |
* |
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED |
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED |
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK. |
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK. |
|
|
* not guaranteed by the Pthread standard. It may or may not be portable |
* not guaranteed by the Pthread standard. It may or may not be portable |
* to other implementations. |
* to other implementations. |
* |
* |
|
* This now also includes an initial attempt at thread support for |
|
* HP/UX 11. |
|
* |
* Note that there is a lot of code duplication between linux_threads.c |
* Note that there is a lot of code duplication between linux_threads.c |
* and irix_threads.c; any changes made here may need to be reflected |
* and hpux_irix_threads.c; any changes made here may need to be reflected |
* there too. |
* there too. |
*/ |
*/ |
|
|
# if defined(IRIX_THREADS) |
# if defined(GC_IRIX_THREADS) || defined(IRIX_THREADS) |
|
|
# include "gc_priv.h" |
# include "private/gc_priv.h" |
# include <pthread.h> |
# include <pthread.h> |
# include <semaphore.h> |
# include <semaphore.h> |
# include <time.h> |
# include <time.h> |
|
|
#undef pthread_create |
#undef pthread_create |
#undef pthread_sigmask |
#undef pthread_sigmask |
#undef pthread_join |
#undef pthread_join |
|
#undef pthread_detach |
|
|
void GC_thr_init(); |
void GC_thr_init(); |
|
|
Line 169 ptr_t GC_stack_alloc(size_t * stack_size) |
|
Line 174 ptr_t GC_stack_alloc(size_t * stack_size) |
|
result = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_sz); |
result = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_sz); |
result = (ptr_t)(((word)result + GC_page_sz) & ~(GC_page_sz - 1)); |
result = (ptr_t)(((word)result + GC_page_sz) & ~(GC_page_sz - 1)); |
/* Protect hottest page to detect overflow. */ |
/* Protect hottest page to detect overflow. */ |
/* mprotect(result, GC_page_sz, PROT_NONE); */ |
# ifdef STACK_GROWS_UP |
result += GC_page_sz; |
/* mprotect(result + search_sz, GC_page_sz, PROT_NONE); */ |
|
# else |
|
/* mprotect(result, GC_page_sz, PROT_NONE); */ |
|
result += GC_page_sz; |
|
# endif |
} |
} |
*stack_size = search_sz; |
*stack_size = search_sz; |
return(result); |
return(result); |
Line 196 void GC_stack_free(ptr_t stack, size_t size) |
|
Line 205 void GC_stack_free(ptr_t stack, size_t size) |
|
# define THREAD_TABLE_SZ 128 /* Must be power of 2 */ |
# define THREAD_TABLE_SZ 128 /* Must be power of 2 */ |
volatile GC_thread GC_threads[THREAD_TABLE_SZ]; |
volatile GC_thread GC_threads[THREAD_TABLE_SZ]; |
|
|
|
void GC_push_thread_structures GC_PROTO((void)) |
|
{ |
|
GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads)); |
|
} |
|
|
/* Add a thread to GC_threads. We assume it wasn't already there. */ |
/* Add a thread to GC_threads. We assume it wasn't already there. */ |
/* Caller holds allocation lock. */ |
/* Caller holds allocation lock. */ |
GC_thread GC_new_thread(pthread_t id) |
GC_thread GC_new_thread(pthread_t id) |
Line 211 GC_thread GC_new_thread(pthread_t id) |
|
Line 225 GC_thread GC_new_thread(pthread_t id) |
|
/* Dont acquire allocation lock, since we may already hold it. */ |
/* Dont acquire allocation lock, since we may already hold it. */ |
} else { |
} else { |
result = (struct GC_Thread_Rep *) |
result = (struct GC_Thread_Rep *) |
GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL); |
GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL); |
} |
} |
if (result == 0) return(0); |
if (result == 0) return(0); |
result -> id = id; |
result -> id = id; |
Line 375 int GC_is_thread_stack(ptr_t addr) |
|
Line 389 int GC_is_thread_stack(ptr_t addr) |
|
} |
} |
# endif |
# endif |
|
|
/* We hold allocation lock. We assume the world is stopped. */ |
/* We hold allocation lock. Should do exactly the right thing if the */ |
|
/* world is stopped. Should not fail if it isn't. */ |
void GC_push_all_stacks() |
void GC_push_all_stacks() |
{ |
{ |
register int i; |
register int i; |
register GC_thread p; |
register GC_thread p; |
register ptr_t sp = GC_approx_sp(); |
register ptr_t sp = GC_approx_sp(); |
register ptr_t lo, hi; |
register ptr_t hot, cold; |
pthread_t me = pthread_self(); |
pthread_t me = pthread_self(); |
|
|
if (!GC_thr_initialized) GC_thr_init(); |
if (!GC_thr_initialized) GC_thr_init(); |
Line 390 void GC_push_all_stacks() |
|
Line 405 void GC_push_all_stacks() |
|
for (p = GC_threads[i]; p != 0; p = p -> next) { |
for (p = GC_threads[i]; p != 0; p = p -> next) { |
if (p -> flags & FINISHED) continue; |
if (p -> flags & FINISHED) continue; |
if (pthread_equal(p -> id, me)) { |
if (pthread_equal(p -> id, me)) { |
lo = GC_approx_sp(); |
hot = GC_approx_sp(); |
} else { |
} else { |
lo = p -> stack_ptr; |
hot = p -> stack_ptr; |
} |
} |
if (p -> stack_size != 0) { |
if (p -> stack_size != 0) { |
hi = p -> stack + p -> stack_size; |
# ifdef STACK_GROWS_UP |
|
cold = p -> stack; |
|
# else |
|
cold = p -> stack + p -> stack_size; |
|
# endif |
} else { |
} else { |
/* The original stack. */ |
/* The original stack. */ |
hi = GC_stackbottom; |
cold = GC_stackbottom; |
} |
} |
GC_push_all_stack(lo, hi); |
# ifdef STACK_GROWS_UP |
|
GC_push_all_stack(cold, hot); |
|
# else |
|
GC_push_all_stack(hot, cold); |
|
# endif |
} |
} |
} |
} |
} |
} |
Line 482 int GC_pthread_join(pthread_t thread, void **retval) |
|
Line 505 int GC_pthread_join(pthread_t thread, void **retval) |
|
/* Some versions of the Irix pthreads library can erroneously */ |
/* Some versions of the Irix pthreads library can erroneously */ |
/* return EINTR when the call succeeds. */ |
/* return EINTR when the call succeeds. */ |
if (EINTR == result) result = 0; |
if (EINTR == result) result = 0; |
|
if (result == 0) { |
|
LOCK(); |
|
/* Here the pthread thread id may have been recycled. */ |
|
GC_delete_gc_thread(thread, thread_gc_id); |
|
UNLOCK(); |
|
} |
|
return result; |
|
} |
|
|
|
int GC_pthread_detach(pthread_t thread) |
|
{ |
|
int result; |
|
GC_thread thread_gc_id; |
|
|
LOCK(); |
LOCK(); |
/* Here the pthread thread id may have been recycled. */ |
thread_gc_id = GC_lookup_thread(thread); |
GC_delete_gc_thread(thread, thread_gc_id); |
|
UNLOCK(); |
UNLOCK(); |
|
result = REAL_FUNC(pthread_detach)(thread); |
|
if (result == 0) { |
|
LOCK(); |
|
thread_gc_id -> flags |= DETACHED; |
|
/* Here the pthread thread id may have been recycled. */ |
|
if (thread_gc_id -> flags & FINISHED) { |
|
GC_delete_gc_thread(thread, thread_gc_id); |
|
} |
|
UNLOCK(); |
|
} |
return result; |
return result; |
} |
} |
|
|
Line 531 void * GC_start_routine(void * arg) |
|
Line 577 void * GC_start_routine(void * arg) |
|
return(result); |
return(result); |
} |
} |
|
|
|
# define copy_attr(pa_ptr, source) *(pa_ptr) = *(source) |
|
|
int |
int |
GC_pthread_create(pthread_t *new_thread, |
GC_pthread_create(pthread_t *new_thread, |
const pthread_attr_t *attr, |
const pthread_attr_t *attr, |
Line 548 GC_pthread_create(pthread_t *new_thread, |
|
Line 596 GC_pthread_create(pthread_t *new_thread, |
|
/* library, which isn't visible to the collector. */ |
/* library, which isn't visible to the collector. */ |
|
|
if (0 == si) return(ENOMEM); |
if (0 == si) return(ENOMEM); |
sem_init(&(si -> registered), 0, 0); |
if (0 != sem_init(&(si -> registered), 0, 0)) { |
|
ABORT("sem_init failed"); |
|
} |
si -> start_routine = start_routine; |
si -> start_routine = start_routine; |
si -> arg = arg; |
si -> arg = arg; |
LOCK(); |
LOCK(); |
Line 557 GC_pthread_create(pthread_t *new_thread, |
|
Line 607 GC_pthread_create(pthread_t *new_thread, |
|
stack = 0; |
stack = 0; |
(void) pthread_attr_init(&new_attr); |
(void) pthread_attr_init(&new_attr); |
} else { |
} else { |
new_attr = *attr; |
copy_attr(&new_attr, attr); |
pthread_attr_getstackaddr(&new_attr, &stack); |
pthread_attr_getstackaddr(&new_attr, &stack); |
} |
} |
pthread_attr_getstacksize(&new_attr, &stacksize); |
pthread_attr_getstacksize(&new_attr, &stacksize); |
Line 586 GC_pthread_create(pthread_t *new_thread, |
|
Line 636 GC_pthread_create(pthread_t *new_thread, |
|
/* This also ensures that we hold onto si until the child is done */ |
/* This also ensures that we hold onto si until the child is done */ |
/* with it. Thus it doesn't matter whether it is otherwise */ |
/* with it. Thus it doesn't matter whether it is otherwise */ |
/* visible to the collector. */ |
/* visible to the collector. */ |
if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed"); |
while (0 != sem_wait(&(si -> registered))) { |
|
if (errno != EINTR) { |
|
GC_printf1("Sem_wait: errno = %ld\n", (unsigned long) errno); |
|
ABORT("sem_wait failed"); |
|
} |
|
} |
sem_destroy(&(si -> registered)); |
sem_destroy(&(si -> registered)); |
/* pthread_attr_destroy(&new_attr); */ |
pthread_attr_destroy(&new_attr); /* Probably unnecessary under Irix */ |
return(result); |
return(result); |
} |
} |
|
|
GC_bool GC_collecting = 0; /* A hint that we're in the collector and */ |
VOLATILE GC_bool GC_collecting = 0; |
|
/* A hint that we're in the collector and */ |
/* holding the allocation lock for an */ |
/* holding the allocation lock for an */ |
/* extended period. */ |
/* extended period. */ |
|
|
/* Reasonably fast spin locks. Basically the same implementation */ |
/* Reasonably fast spin locks. Basically the same implementation */ |
/* as STL alloc.h. This isn't really the right way to do this. */ |
/* as STL alloc.h. */ |
/* but until the POSIX scheduling mess gets straightened out ... */ |
|
|
|
unsigned long GC_allocate_lock = 0; |
|
|
|
#define SLEEP_THRESHOLD 3 |
#define SLEEP_THRESHOLD 3 |
|
|
|
unsigned long GC_allocate_lock = 0; |
|
# define GC_TRY_LOCK() !GC_test_and_set(&GC_allocate_lock,1) |
|
# define GC_LOCK_TAKEN GC_allocate_lock |
|
|
void GC_lock() |
void GC_lock() |
{ |
{ |
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */ |
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */ |
|
|
# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk |
# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk |
int i; |
int i; |
|
|
if (!GC_test_and_set(&GC_allocate_lock, 1)) { |
if (GC_TRY_LOCK()) { |
return; |
return; |
} |
} |
junk = 0; |
junk = 0; |
|
|
my_last_spins = last_spins; |
my_last_spins = last_spins; |
for (i = 0; i < my_spin_max; i++) { |
for (i = 0; i < my_spin_max; i++) { |
if (GC_collecting) goto yield; |
if (GC_collecting) goto yield; |
if (i < my_last_spins/2 || GC_allocate_lock) { |
if (i < my_last_spins/2 || GC_LOCK_TAKEN) { |
PAUSE; |
PAUSE; |
continue; |
continue; |
} |
} |
if (!GC_test_and_set(&GC_allocate_lock, 1)) { |
if (GC_TRY_LOCK()) { |
/* |
/* |
* got it! |
* got it! |
* Spinning worked. Thus we're probably not being scheduled |
* Spinning worked. Thus we're probably not being scheduled |
|
|
spin_max = low_spin_max; |
spin_max = low_spin_max; |
yield: |
yield: |
for (i = 0;; ++i) { |
for (i = 0;; ++i) { |
if (!GC_test_and_set(&GC_allocate_lock, 1)) { |
if (GC_TRY_LOCK()) { |
return; |
return; |
} |
} |
if (i < SLEEP_THRESHOLD) { |
if (i < SLEEP_THRESHOLD) { |
|
|
} |
} |
} |
} |
} |
} |
|
|
|
|
|
|
# else |
# else |
|
|