[BACK]Return to linux_threads.c CVS log [TXT][DIR] Up to [local] / OpenXM_contrib2 / asir2000 / gc

Diff for /OpenXM_contrib2/asir2000/gc/Attic/linux_threads.c between version 1.5 and 1.6

version 1.5, 2002/07/24 07:46:18 version 1.6, 2002/07/24 08:00:09
Line 26 
Line 26 
  * and thread support for some of the other Posix platforms; any changes   * and thread support for some of the other Posix platforms; any changes
  * made here may need to be reflected there too.   * made here may need to be reflected there too.
  */   */
    /* DG/UX ix86 support <takis@xfree86.org> */
 /*  /*
  * Linux_threads.c now also includes some code to support HPUX and   * Linux_threads.c now also includes some code to support HPUX and
  * OSF1 (Compaq Tru64 Unix, really).  The OSF1 support is not yet   * OSF1 (Compaq Tru64 Unix, really).  The OSF1 support is not yet
Line 50 
Line 51 
   
 /* ANSI C requires that a compilation unit contains something */  /* ANSI C requires that a compilation unit contains something */
   
 # if defined(GC_LINUX_THREADS) || defined(LINUX_THREADS) \  # include "gc.h"
      || defined(GC_HPUX_THREADS) || defined(HPUX_THREADS) \  
      || defined(GC_OSF1_THREADS) || defined(OSF1_THREADS) \  
   
   # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
        && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS)
   
 # include "private/gc_priv.h"  # include "private/gc_priv.h"
   
   # if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
        && !defined(USE_HPUX_TLS)
   #   define USE_HPUX_TLS
   # endif
   
   # if defined(GC_DGUX386_THREADS) && !defined(USE_PTHREAD_SPECIFIC)
   #   define USE_PTHREAD_SPECIFIC
   # endif
   
   # if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
   #   define _POSIX4A_DRAFT10_SOURCE 1
   # endif
   
   # if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10)
   #   define _USING_POSIX4A_DRAFT10 1
   # endif
   
 # ifdef THREAD_LOCAL_ALLOC  # ifdef THREAD_LOCAL_ALLOC
 #   if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_HPUX_TLS)  #   if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_HPUX_TLS)
 #     include "private/specific.h"  #     include "private/specific.h"
Line 86 
Line 106 
 # include <sys/stat.h>  # include <sys/stat.h>
 # include <fcntl.h>  # include <fcntl.h>
   
   #if defined(GC_DGUX386_THREADS)
   # include <sys/dg_sys_info.h>
   # include <sys/_int_psem.h>
     /* sem_t is an uint in DG/UX */
     typedef unsigned int  sem_t;
   #endif /* GC_DGUX386_THREADS */
   
 #ifndef __GNUC__  #ifndef __GNUC__
 #   define __inline__  #   define __inline__
 #endif  #endif
Line 95 
Line 122 
 #   define REAL_FUNC(f) __real_##f  #   define REAL_FUNC(f) __real_##f
 #else  #else
 #   define WRAP_FUNC(f) GC_##f  #   define WRAP_FUNC(f) GC_##f
 #   define REAL_FUNC(f) f  #   if !defined(GC_DGUX386_THREADS)
   #     define REAL_FUNC(f) f
   #   else /* GC_DGUX386_THREADS */
   #     define REAL_FUNC(f) __d10_##f
   #   endif /* GC_DGUX386_THREADS */
 #   undef pthread_create  #   undef pthread_create
 #   undef pthread_sigmask  #   undef pthread_sigmask
 #   undef pthread_join  #   undef pthread_join
Line 161  typedef struct GC_Thread_Rep {
Line 192  typedef struct GC_Thread_Rep {
 #   ifdef THREAD_LOCAL_ALLOC  #   ifdef THREAD_LOCAL_ALLOC
 #       if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)  #       if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
 #           define GRANULARITY 16  #           define GRANULARITY 16
 #           define NFREELISTS 48  #           define NFREELISTS 49
 #       else  #       else
 #           define GRANULARITY 8  #           define GRANULARITY 8
 #           define NFREELISTS 64  #           define NFREELISTS 65
 #       endif  #       endif
         /* The ith free list corresponds to size (i+1)*GRANULARITY */          /* The ith free list corresponds to size i*GRANULARITY */
 #       define INDEX_FROM_BYTES(n) (ADD_SLOP(n) - 1)/GRANULARITY  #       define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
 #       define BYTES_FROM_INDEX(i) (((i) + 1) * GRANULARITY - EXTRA_BYTES)  #       define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
 #       define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= NFREELISTS*GRANULARITY)  #       define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
                                       (NFREELISTS-1)*GRANULARITY)
         ptr_t ptrfree_freelists[NFREELISTS];          ptr_t ptrfree_freelists[NFREELISTS];
         ptr_t normal_freelists[NFREELISTS];          ptr_t normal_freelists[NFREELISTS];
 #       ifdef GC_GCJ_SUPPORT  #       ifdef GC_GCJ_SUPPORT
Line 194  typedef struct GC_Thread_Rep {
Line 226  typedef struct GC_Thread_Rep {
   
 GC_thread GC_lookup_thread(pthread_t id);  GC_thread GC_lookup_thread(pthread_t id);
   
 static GC_bool fully_initialized = FALSE;  static GC_bool parallel_initialized = FALSE;
   
 # if defined(__GNUC__)  void GC_init_parallel();
     void GC_full_init() __attribute__ ((constructor));  
 # else  
     void GC_full_init();  
 # endif  
   
 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)  # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
   
Line 223  static void return_freelists(ptr_t *fl, ptr_t *gfl)
Line 251  static void return_freelists(ptr_t *fl, ptr_t *gfl)
     ptr_t q, *qptr;      ptr_t q, *qptr;
     size_t nwords;      size_t nwords;
   
     for (i = 0; i < NFREELISTS; ++i) {      for (i = 1; i < NFREELISTS; ++i) {
         nwords = (i + 1) * (GRANULARITY/sizeof(word));          nwords = i * (GRANULARITY/sizeof(word));
         qptr = fl + i;          qptr = fl + i;
         q = *qptr;          q = *qptr;
         if ((word)q < HBLKSIZE) continue;          if ((word)q >= HBLKSIZE) {
         if (gfl[nwords] == 0) {            if (gfl[nwords] == 0) {
             gfl[nwords] = q;              gfl[nwords] = q;
         } else {            } else {
             /* Concatenate: */              /* Concatenate: */
             for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);              for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
             GC_ASSERT(0 == q);              GC_ASSERT(0 == q);
             *qptr = gfl[nwords];              *qptr = gfl[nwords];
             gfl[nwords] = fl[i];              gfl[nwords] = fl[i];
             }
         }          }
         /* Clear fl[i], since the thread structure may hang around.     */          /* Clear fl[i], since the thread structure may hang around.     */
         /* Do it in a way that is likely to trap if we access it.       */          /* Do it in a way that is likely to trap if we access it.       */
Line 243  static void return_freelists(ptr_t *fl, ptr_t *gfl)
Line 272  static void return_freelists(ptr_t *fl, ptr_t *gfl)
     }      }
 }  }
   
   /* We statically allocate a single "size 0" object. It is linked to     */
   /* itself, and is thus repeatedly reused for all size 0 allocation      */
   /* requests.  (Size 0 gcj allocation requests are incorrect, and        */
   /* we arrange for those to fault asap.)                                 */
   static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
   
 /* Each thread structure must be initialized.   */  /* Each thread structure must be initialized.   */
 /* This call must be made from the new thread.  */  /* This call must be made from the new thread.  */
 /* Caller holds allocation lock.                */  /* Caller holds allocation lock.                */
Line 259  void GC_init_thread_local(GC_thread p)
Line 294  void GC_init_thread_local(GC_thread p)
     if (0 != GC_setspecific(GC_thread_key, p)) {      if (0 != GC_setspecific(GC_thread_key, p)) {
         ABORT("Failed to set thread specific allocation pointers");          ABORT("Failed to set thread specific allocation pointers");
     }      }
     for (i = 0; i < NFREELISTS; ++i) {      for (i = 1; i < NFREELISTS; ++i) {
         p -> ptrfree_freelists[i] = (ptr_t)1;          p -> ptrfree_freelists[i] = (ptr_t)1;
         p -> normal_freelists[i] = (ptr_t)1;          p -> normal_freelists[i] = (ptr_t)1;
 #       ifdef GC_GCJ_SUPPORT  #       ifdef GC_GCJ_SUPPORT
           p -> gcj_freelists[i] = (ptr_t)1;            p -> gcj_freelists[i] = (ptr_t)1;
 #       endif  #       endif
     }      }
       /* Set up the size 0 free lists.    */
       p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
       p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
   #   ifdef GC_GCJ_SUPPORT
           p -> gcj_freelists[0] = (ptr_t)(-1);
   #   endif
 }  }
   
 #ifdef GC_GCJ_SUPPORT  #ifdef GC_GCJ_SUPPORT
Line 275  void GC_init_thread_local(GC_thread p)
Line 316  void GC_init_thread_local(GC_thread p)
 /* We hold the allocator lock.  */  /* We hold the allocator lock.  */
 void GC_destroy_thread_local(GC_thread p)  void GC_destroy_thread_local(GC_thread p)
 {  {
     /* We currently only do this from the thread itself.        */      /* We currently only do this from the thread itself or from */
         GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);      /* the fork handler for a child process.                    */
   #   ifndef HANDLE_FORK
         GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
   #   endif
     return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);      return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
     return_freelists(p -> normal_freelists, GC_objfreelist);      return_freelists(p -> normal_freelists, GC_objfreelist);
 #   ifdef GC_GCJ_SUPPORT  #   ifdef GC_GCJ_SUPPORT
Line 297  GC_PTR GC_local_malloc(size_t bytes)
Line 341  GC_PTR GC_local_malloc(size_t bytes)
         GC_key_t k = GC_thread_key;          GC_key_t k = GC_thread_key;
         void * tsd;          void * tsd;
   
 #       if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) \  #       if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
            || !defined(__GNUC__)  
             if (EXPECT(0 == k, 0)) {              if (EXPECT(0 == k, 0)) {
                 /* This can happen if we get called when the world is   */                  /* This can happen if we get called when the world is   */
                 /* being initialized.  Whether we can actually complete */                  /* being initialized.  Whether we can actually complete */
                 /* the initialization then is unclear.                  */                  /* the initialization then is unclear.                  */
                 GC_full_init();                  GC_init_parallel();
                 k = GC_thread_key;                  k = GC_thread_key;
             }              }
 #       endif  #       endif
Line 326  GC_PTR GC_local_malloc(size_t bytes)
Line 369  GC_PTR GC_local_malloc(size_t bytes)
             *my_fl = my_entry + index + 1;              *my_fl = my_entry + index + 1;
             return GC_malloc(bytes);              return GC_malloc(bytes);
         } else {          } else {
             my_entry = GC_generic_malloc_many(BYTES_FROM_INDEX(index),              GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
                                               NORMAL);              if (*my_fl == 0) return GC_oom_fn(bytes);
             *my_fl = my_entry;  
             if (my_entry == 0) return GC_oom_fn(bytes);  
             return GC_local_malloc(bytes);              return GC_local_malloc(bytes);
         }          }
     }      }
Line 352  GC_PTR GC_local_malloc_atomic(size_t bytes)
Line 393  GC_PTR GC_local_malloc_atomic(size_t bytes)
             *my_fl = my_entry + index + 1;              *my_fl = my_entry + index + 1;
             return GC_malloc_atomic(bytes);              return GC_malloc_atomic(bytes);
         } else {          } else {
             my_entry = GC_generic_malloc_many(BYTES_FROM_INDEX(index),              GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
                                               PTRFREE);              /* *my_fl is updated while the collector is excluded;       */
             *my_fl = my_entry;              /* the free list is always visible to the collector as      */
             if (my_entry == 0) return GC_oom_fn(bytes);              /* such.                                                    */
               if (*my_fl == 0) return GC_oom_fn(bytes);
             return GC_local_malloc_atomic(bytes);              return GC_local_malloc_atomic(bytes);
         }          }
     }      }
Line 390  GC_PTR GC_local_gcj_malloc(size_t bytes,
Line 432  GC_PTR GC_local_gcj_malloc(size_t bytes,
             /* allocation of the next object, but to see this object    */              /* allocation of the next object, but to see this object    */
             /* still containing a free list pointer.  Otherwise the     */              /* still containing a free list pointer.  Otherwise the     */
             /* marker might find a random "mark descriptor".            */              /* marker might find a random "mark descriptor".            */
             *my_fl = obj_link(my_entry);              *(volatile ptr_t *)my_fl = obj_link(my_entry);
             *(void **)result = ptr_to_struct_containing_descr;              /* We must update the freelist before we store the pointer. */
               /* Otherwise a GC at this point would see a corrupted       */
               /* free list.                                               */
               /* A memory barrier is probably never needed, since the     */
               /* action of stopping this thread will cause prior writes   */
               /* to complete.                                             */
               GC_ASSERT(((void * volatile *)result)[1] == 0);
               *(void * volatile *)result = ptr_to_struct_containing_descr;
             return result;              return result;
         } else if ((word)my_entry - 1 < DIRECT_GRANULES) {          } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
             *my_fl = my_entry + index + 1;              if (!GC_incremental) *my_fl = my_entry + index + 1;
                   /* In the incremental case, we always have to take this */
                   /* path.  Thus we leave the counter alone.              */
             return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);              return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
         } else {          } else {
             my_entry = GC_generic_malloc_many(BYTES_FROM_INDEX(index),              GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
                                               GC_gcj_kind);              if (*my_fl == 0) return GC_oom_fn(bytes);
             *my_fl = my_entry;              return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
             if (my_entry == 0) return GC_oom_fn(bytes);  
             return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);  
         }          }
     }      }
 }  }
Line 415  GC_PTR GC_local_gcj_malloc(size_t bytes,
Line 464  GC_PTR GC_local_gcj_malloc(size_t bytes,
 # endif /* !THREAD_LOCAL_ALLOC */  # endif /* !THREAD_LOCAL_ALLOC */
   
 /*  /*
  * The only way to suspend threads given the pthread interface is to send   * We use signals to stop threads during GC.
  * signals.  We can't use SIGSTOP directly, because we need to get the   *
  * thread to save its stack pointer in the GC thread table before   * Suspended threads wait in signal handler for SIG_THR_RESTART.
  * suspending.  So we have to reserve a signal of our own for this.   * That's more portable than semaphores or condition variables.
  * This means we have to intercept client calls to change the signal mask.   * (We do use sem_post from a signal handler, but that should be portable.)
  * The linuxthreads package already uses SIGUSR1 and SIGUSR2,   *
  * so we need to reuse something else.  I chose SIGPWR.   * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
  * (Perhaps SIGUNUSED would be a better choice.)   * Note that we can't just stop a thread; we need it to save its stack
    * pointer(s) and acknowledge.
  */   */
 #ifndef SIG_SUSPEND  
 #  if defined(HPUX_THREADS) || defined(GC_OSF1_THREADS)  
 #   define SIG_SUSPEND _SIGRTMIN + 6  
 #  else  
 #   define SIG_SUSPEND SIGPWR  
 #  endif  
 #endif  
   
 #ifndef SIG_THR_RESTART  #ifndef SIG_THR_RESTART
 #  if defined(HPUX_THREADS) || defined(GC_OSF1_THREADS)  #  if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
 #   define SIG_THR_RESTART _SIGRTMIN + 5  #   define SIG_THR_RESTART _SIGRTMIN + 5
 #  else  #  else
 #   define SIG_THR_RESTART SIGXCPU  #   define SIG_THR_RESTART SIGXCPU
 #  endif  #  endif
 #endif  #endif
   
 /* SPARC/Linux doesn't properly define SIGPWR in <signal.h>.  
  * It is aliased to SIGLOST in asm/signal.h, though.            */  
 #if defined(SPARC) && !defined(SIGPWR)  
 #   define SIGPWR SIGLOST  
 #endif  
   
 sem_t GC_suspend_ack_sem;  sem_t GC_suspend_ack_sem;
   
 #if !defined(HPUX_THREADS) && !defined(GC_OSF1_THREADS)  #if 0
 /*  /*
 To make sure that we're using LinuxThreads and not some other thread  To make sure that we're using LinuxThreads and not some other thread
 package, we generate a dummy reference to `pthread_kill_other_threads_np'  package, we generate a dummy reference to `pthread_kill_other_threads_np'
 (was `__pthread_initial_thread_bos' but that disappeared),  (was `__pthread_initial_thread_bos' but that disappeared),
 which is a symbol defined in LinuxThreads, but (hopefully) not in other  which is a symbol defined in LinuxThreads, but (hopefully) not in other
 thread packages.  thread packages.
   
   We no longer do this, since this code is now portable enough that it might
   actually work for something else.
 */  */
 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;  void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
 #endif /* !HPUX_THREADS */  #endif /* 0 */
   
 #if defined(SPARC) || defined(IA64)  #if defined(SPARC) || defined(IA64)
   extern word GC_save_regs_in_stack();    extern word GC_save_regs_in_stack();
Line 520  static void start_mark_threads()
Line 560  static void start_mark_threads()
   
     if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))      if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
         ABORT("pthread_attr_setdetachstate failed");          ABORT("pthread_attr_setdetachstate failed");
   
   #   if defined(HPUX) || defined(GC_DGUX386_THREADS)
         /* Default stack size is usually too small: fix it. */
         /* Otherwise marker threads or GC may run out of    */
         /* space.                                           */
   #     define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
         {
           size_t old_size;
           int code;
   
           if (pthread_attr_getstacksize(&attr, &old_size) != 0)
             ABORT("pthread_attr_getstacksize failed\n");
           if (old_size < MIN_STACK_SIZE) {
             if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
                     ABORT("pthread_attr_setstacksize failed\n");
           }
         }
   #   endif /* HPUX || GC_DGUX386_THREADS */
 #   ifdef CONDPRINT  #   ifdef CONDPRINT
       if (GC_print_stats) {        if (GC_print_stats) {
         GC_printf1("Starting %ld marker threads\n", GC_markers - 1);          GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
Line 654  void GC_push_thread_structures GC_PROTO((void))
Line 712  void GC_push_thread_structures GC_PROTO((void))
     GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));      GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
 }  }
   
   #ifdef THREAD_LOCAL_ALLOC
   /* We must explicitly mark ptrfree and gcj free lists, since the free   */
   /* list links wouldn't otherwise be found.  We also set them in the     */
   /* normal free lists, since that involves touching less memory than if  */
   /* we scanned them normally.                                            */
   void GC_mark_thread_local_free_lists(void)
   {
       int i, j;
       GC_thread p;
       ptr_t q;
   
       for (i = 0; i < THREAD_TABLE_SZ; ++i) {
         for (p = GC_threads[i]; 0 != p; p = p -> next) {
           for (j = 1; j < NFREELISTS; ++j) {
             q = p -> ptrfree_freelists[j];
             if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
             q = p -> normal_freelists[j];
             if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
   #         ifdef GC_GCJ_SUPPORT
               q = p -> gcj_freelists[j];
               if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
   #         endif /* GC_GCJ_SUPPORT */
           }
         }
       }
   }
   #endif /* THREAD_LOCAL_ALLOC */
   
   static struct GC_Thread_Rep first_thread;
   
 /* Add a thread to GC_threads.  We assume it wasn't already there.      */  /* Add a thread to GC_threads.  We assume it wasn't already there.      */
 /* Caller holds allocation lock.                                        */  /* Caller holds allocation lock.                                        */
 GC_thread GC_new_thread(pthread_t id)  GC_thread GC_new_thread(pthread_t id)
 {  {
     int hv = ((word)id) % THREAD_TABLE_SZ;      int hv = ((word)id) % THREAD_TABLE_SZ;
     GC_thread result;      GC_thread result;
     static struct GC_Thread_Rep first_thread;  
     static GC_bool first_thread_used = FALSE;      static GC_bool first_thread_used = FALSE;
   
     if (!first_thread_used) {      if (!first_thread_used) {
Line 736  GC_thread GC_lookup_thread(pthread_t id)
Line 823  GC_thread GC_lookup_thread(pthread_t id)
     return(p);      return(p);
 }  }
   
   #ifdef HANDLE_FORK
   /* Remove all entries from the GC_threads table, except the     */
   /* one for the current thread.  We need to do this in the child */
   /* process after a fork(), since only the current thread        */
   /* survives in the child.                                       */
   void GC_remove_all_threads_but_me(void)
   {
       pthread_t self = pthread_self();
       int hv;
       GC_thread p, next, me;
   
       for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
         me = 0;
         for (p = GC_threads[hv]; 0 != p; p = next) {
           next = p -> next;
           if (p -> id == self) {
             me = p;
             p -> next = 0;
           } else {
   #         ifdef THREAD_LOCAL_ALLOC
               if (!(p -> flags & FINISHED)) {
                 GC_destroy_thread_local(p);
               }
   #         endif /* THREAD_LOCAL_ALLOC */
             if (p != &first_thread) GC_INTERNAL_FREE(p);
           }
         }
         GC_threads[hv] = me;
       }
   }
   #endif /* HANDLE_FORK */
   
   /* There seems to be a very rare thread stopping problem.  To help us  */
   /* debug that, we save the ids of the stopping thread. */
   pthread_t GC_stopping_thread;
   int GC_stopping_pid;
   
 /* Caller holds allocation lock.        */  /* Caller holds allocation lock.        */
 void GC_stop_world()  void GC_stop_world()
 {  {
Line 745  void GC_stop_world()
Line 869  void GC_stop_world()
     register int n_live_threads = 0;      register int n_live_threads = 0;
     register int result;      register int result;
   
       GC_stopping_thread = my_thread;    /* debugging only.      */
       GC_stopping_pid = getpid();                /* debugging only.      */
     /* Make sure all free list construction has stopped before we start. */      /* Make sure all free list construction has stopped before we start. */
     /* No new construction can start, since free list construction is   */      /* No new construction can start, since free list construction is   */
     /* required to acquire and release the GC lock before it starts,    */      /* required to acquire and release the GC lock before it starts,    */
Line 787  void GC_stop_world()
Line 913  void GC_stop_world()
     #if DEBUG_THREADS      #if DEBUG_THREADS
       GC_printf1("World stopped 0x%x\n", pthread_self());        GC_printf1("World stopped 0x%x\n", pthread_self());
     #endif      #endif
       GC_stopping_thread = 0;  /* debugging only */
 }  }
   
 /* Caller holds allocation lock, and has held it continuously since     */  /* Caller holds allocation lock, and has held it continuously since     */
Line 923  int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
Line 1050  int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
 }  }
 #endif /* USE_PROC_FOR_LIBRARIES */  #endif /* USE_PROC_FOR_LIBRARIES */
   
 #ifdef LINUX_THREADS  #ifdef GC_LINUX_THREADS
 /* Return the number of processors, or i<= 0 if it can't be determined. */  /* Return the number of processors, or i<= 0 if it can't be determined. */
 int GC_get_nprocs()  int GC_get_nprocs()
 {  {
Line 957  int GC_get_nprocs()
Line 1084  int GC_get_nprocs()
             if (cpu_no >= result) result = cpu_no + 1;              if (cpu_no >= result) result = cpu_no + 1;
         }          }
     }      }
       close(f);
     return result;      return result;
 }  }
 #endif /* LINUX_THREADS */  #endif /* GC_LINUX_THREADS */
   
   /* We hold the GC lock.  Wait until an in-progress GC has finished.     */
   /* Repeatedly RELEASES GC LOCK in order to wait.                        */
   /* If wait_for_all is true, then we exit with the GC lock held and no   */
   /* collection in progress; otherwise we just wait for the current GC    */
   /* to finish.                                                           */
   void GC_wait_for_gc_completion(GC_bool wait_for_all)
   {
       if (GC_incremental && GC_collection_in_progress()) {
           int old_gc_no = GC_gc_no;
   
           /* Make sure that no part of our stack is still on the mark stack, */
           /* since it's about to be unmapped.                                */
           while (GC_incremental && GC_collection_in_progress()
                  && (wait_for_all || old_gc_no == GC_gc_no)) {
               ENTER_GC();
               GC_collect_a_little_inner(1);
               EXIT_GC();
               UNLOCK();
               sched_yield();
               LOCK();
           }
       }
   }
   
   #ifdef HANDLE_FORK
   /* Procedures called before and after a fork.  The goal here is to make */
   /* it safe to call GC_malloc() in a forked child.  It's unclear that is */
   /* attainable, since the single UNIX spec seems to imply that one       */
   /* should only call async-signal-safe functions, and we probably can't  */
   /* quite guarantee that.  But we give it our best shot.  (That same     */
   /* spec also implies that it's not safe to call the system malloc       */
   /* between fork() and exec().  Thus we're doing no worse than it.       */
   
   /* Called before a fork()               */
   void GC_fork_prepare_proc(void)
   {
       /* Acquire all relevant locks, so that after releasing the locks    */
       /* the child will see a consistent state in which monitor           */
       /* invariants hold.  Unfortunately, we can't acquire libc locks     */
       /* we might need, and there seems to be no guarantee that libc      */
       /* must install a suitable fork handler.                            */
       /* Wait for an ongoing GC to finish, since we can't finish it in    */
       /* the (one remaining thread in) the child.                         */
         LOCK();
   #     if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
           GC_wait_for_reclaim();
   #     endif
         GC_wait_for_gc_completion(TRUE);
   #     if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
           GC_acquire_mark_lock();
   #     endif
   }
   
   /* Called in parent after a fork()      */
   void GC_fork_parent_proc(void)
   {
   #   if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
         GC_release_mark_lock();
   #   endif
       UNLOCK();
   }
   
   /* Called in child after a fork()       */
   void GC_fork_child_proc(void)
   {
       /* Clean up the thread table, so that just our thread is left. */
   #   if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
         GC_release_mark_lock();
   #   endif
       GC_remove_all_threads_but_me();
   #   ifdef PARALLEL_MARK
         /* Turn off parallel marking in the child, since we are probably  */
         /* just going to exec, and we would have to restart mark threads. */
           GC_markers = 1;
           GC_parallel = FALSE;
   #   endif /* PARALLEL_MARK */
       UNLOCK();
   }
   #endif /* HANDLE_FORK */
   
   #if defined(GC_DGUX386_THREADS)
   /* Return the number of processors, or i<= 0 if it can't be determined. */
   int GC_get_nprocs()
   {
       /* <takis@XFree86.Org> */
       int numCpus;
       struct dg_sys_info_pm_info pm_sysinfo;
       int status =0;
   
       status = dg_sys_info((long int *) &pm_sysinfo,
           DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
       if (status < 0)
          /* set -1 for error */
          numCpus = -1;
       else
         /* Active CPUs */
         numCpus = pm_sysinfo.idle_vp_count;
   
   #  ifdef DEBUG_THREADS
       GC_printf1("Number of active CPUs in this system: %d\n", numCpus);
   #  endif
       return(numCpus);
   }
   #endif /* GC_DGUX386_THREADS */
   
 /* We hold the allocation lock. */  /* We hold the allocation lock. */
 void GC_thr_init()  void GC_thr_init()
 {  {
Line 997  void GC_thr_init()
Line 1230  void GC_thr_init()
     if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {      if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
         ABORT("Cannot set SIG_THR_RESTART handler");          ABORT("Cannot set SIG_THR_RESTART handler");
     }      }
 #   ifdef INSTALL_LOOPING_SEGV_HANDLER  #   ifdef HANDLE_FORK
         act.sa_handler = GC_looping_handler;        /* Prepare for a possible fork.   */
         if (sigaction(SIGSEGV, &act, NULL) != 0          pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
             || sigaction(SIGBUS, &act, NULL) != 0) {                         GC_fork_child_proc);
             ABORT("Cannot set SIGSEGV or SIGBUS looping handler");  #   endif /* HANDLE_FORK */
         }  
 #   endif  /* INSTALL_LOOPING_SEGV_HANDLER */  
   
     /* Add the initial thread, so we can stop it.       */      /* Add the initial thread, so we can stop it.       */
       t = GC_new_thread(pthread_self());        t = GC_new_thread(pthread_self());
       t -> stack_ptr = (ptr_t)(&dummy);        t -> stack_ptr = (ptr_t)(&dummy);
Line 1017  void GC_thr_init()
Line 1247  void GC_thr_init()
         if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);          if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
       }        }
       if (GC_nprocs <= 0) {        if (GC_nprocs <= 0) {
 #       if defined(HPUX_THREADS)  #       if defined(GC_HPUX_THREADS)
           GC_nprocs = pthread_num_processors_np();            GC_nprocs = pthread_num_processors_np();
 #       endif  #       endif
 #       if defined(OSF1_THREADS)  #       if defined(GC_OSF1_THREADS) || defined(GC_FREEBSD_THREADS)
           GC_nprocs = 1;            GC_nprocs = 1;
 #       endif  #       endif
 #       ifdef LINUX_THREADS  #       if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
           GC_nprocs = GC_get_nprocs();            GC_nprocs = GC_get_nprocs();
 #       endif  #       endif
       }        }
Line 1035  void GC_thr_init()
Line 1265  void GC_thr_init()
 #       endif  #       endif
       } else {        } else {
 #       ifdef PARALLEL_MARK  #       ifdef PARALLEL_MARK
           GC_markers = GC_nprocs;            {
               char * markers_string = GETENV("GC_MARKERS");
               if (markers_string != NULL) {
                 GC_markers = atoi(markers_string);
               } else {
                 GC_markers = GC_nprocs;
               }
             }
 #       endif  #       endif
       }        }
 #   ifdef PARALLEL_MARK  #   ifdef PARALLEL_MARK
Line 1054  void GC_thr_init()
Line 1291  void GC_thr_init()
 #       endif  #       endif
       } else {        } else {
         GC_parallel = TRUE;          GC_parallel = TRUE;
           /* Disable true incremental collection, but generational is OK. */
           GC_time_limit = GC_TIME_UNLIMITED;
       }        }
 #   endif  #   endif
 }  }
Line 1061  void GC_thr_init()
Line 1300  void GC_thr_init()
   
 /* Perform all initializations, including those that    */  /* Perform all initializations, including those that    */
 /* may require allocation.                              */  /* may require allocation.                              */
 /* Called as constructor without allocation lock.       */  /* Called without allocation lock.                      */
 /* Must be called before a second thread is created.    */  /* Must be called before a second thread is created.    */
 void GC_full_init()  /* Called without allocation lock.                      */
   void GC_init_parallel()
 {  {
     if (fully_initialized) return;      if (parallel_initialized) return;
       parallel_initialized = TRUE;
           /* GC_init() calls us back, so set flag first.  */
     if (!GC_is_initialized) GC_init();      if (!GC_is_initialized) GC_init();
     /* If we are using a parallel marker, start the helper threads.  */      /* If we are using a parallel marker, start the helper threads.  */
 #     ifdef PARALLEL_MARK  #     ifdef PARALLEL_MARK
Line 1077  void GC_full_init()
Line 1319  void GC_full_init()
       GC_init_thread_local(GC_lookup_thread(pthread_self()));        GC_init_thread_local(GC_lookup_thread(pthread_self()));
       UNLOCK();        UNLOCK();
 #   endif  #   endif
     fully_initialized = TRUE;  
 }  }
   
   
Line 1132  GC_end_blocking(void) {
Line 1373  GC_end_blocking(void) {
     UNLOCK();      UNLOCK();
 }  }
   
   #if defined(GC_DGUX386_THREADS)
   #define __d10_sleep sleep
   #endif /* GC_DGUX386_THREADS */
   
 /* A wrapper for the standard C sleep function  */  /* A wrapper for the standard C sleep function  */
 int WRAP_FUNC(sleep) (unsigned int seconds)  int WRAP_FUNC(sleep) (unsigned int seconds)
 {  {
Line 1172  void GC_thread_exit_proc(void *arg)
Line 1417  void GC_thread_exit_proc(void *arg)
        && !defined(USE_HPUX_TLS) && !defined(DBG_HDRS_ALL)         && !defined(USE_HPUX_TLS) && !defined(DBG_HDRS_ALL)
       GC_remove_specific(GC_thread_key);        GC_remove_specific(GC_thread_key);
 #   endif  #   endif
     if (GC_incremental && GC_collection_in_progress()) {      GC_wait_for_gc_completion(FALSE);
         int old_gc_no = GC_gc_no;  
   
         /* Make sure that no part of our stack is still on the mark stack, */  
         /* since it's about to be unmapped.                                */  
         while (GC_incremental && GC_collection_in_progress()  
                && old_gc_no == GC_gc_no) {  
             ENTER_GC();  
             GC_collect_a_little_inner(1);  
             EXIT_GC();  
             UNLOCK();  
             sched_yield();  
             LOCK();  
         }  
     }  
     UNLOCK();      UNLOCK();
 }  }
   
Line 1201  int WRAP_FUNC(pthread_join)(pthread_t thread, void **r
Line 1432  int WRAP_FUNC(pthread_join)(pthread_t thread, void **r
     /* cant have been recycled by pthreads.                             */      /* cant have been recycled by pthreads.                             */
     UNLOCK();      UNLOCK();
     result = REAL_FUNC(pthread_join)(thread, retval);      result = REAL_FUNC(pthread_join)(thread, retval);
   # if defined (GC_FREEBSD_THREADS)
       /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
          appears to be) a spurious EINTR which caused the test and real code
          to gratuitously fail.  Having looked at system pthread library source
          code, I see how this return code may be generated.  In one path of
          code, pthread_join() just returns the errno setting of the thread
          being joined.  This does not match the POSIX specification or the
          local man pages thus I have taken the liberty to catch this one
          spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
       if (result == EINTR) result = 0;
   # endif
     if (result == 0) {      if (result == 0) {
         LOCK();          LOCK();
         /* Here the pthread thread id may have been recycled. */          /* Here the pthread thread id may have been recycled. */
Line 1315  WRAP_FUNC(pthread_create)(pthread_t *new_thread,
Line 1557  WRAP_FUNC(pthread_create)(pthread_t *new_thread,
         /* This is otherwise saved only in an area mmapped by the thread */          /* This is otherwise saved only in an area mmapped by the thread */
         /* library, which isn't visible to the collector.                */          /* library, which isn't visible to the collector.                */
   
       /* We resist the temptation to muck with the stack size here,       */
       /* even if the default is unreasonably small.  That's the client's  */
       /* responsibility.                                                  */
   
     LOCK();      LOCK();
     si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info), NORMAL);      si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
                                                    NORMAL);
     UNLOCK();      UNLOCK();
     if (!fully_initialized) GC_full_init();      if (!parallel_initialized) GC_init_parallel();
     if (0 == si) return(ENOMEM);      if (0 == si) return(ENOMEM);
     sem_init(&(si -> registered), 0, 0);      sem_init(&(si -> registered), 0, 0);
     si -> start_routine = start_routine;      si -> start_routine = start_routine;
Line 1327  WRAP_FUNC(pthread_create)(pthread_t *new_thread,
Line 1574  WRAP_FUNC(pthread_create)(pthread_t *new_thread,
     if (!GC_thr_initialized) GC_thr_init();      if (!GC_thr_initialized) GC_thr_init();
     if (NULL == attr) {      if (NULL == attr) {
         detachstate = PTHREAD_CREATE_JOINABLE;          detachstate = PTHREAD_CREATE_JOINABLE;
     } else {      } else {
         pthread_attr_getdetachstate(attr, &detachstate);          pthread_attr_getdetachstate(attr, &detachstate);
     }      }
     if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;      if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
Line 1337  WRAP_FUNC(pthread_create)(pthread_t *new_thread,
Line 1584  WRAP_FUNC(pthread_create)(pthread_t *new_thread,
         GC_printf1("About to start new thread from thread 0x%X\n",          GC_printf1("About to start new thread from thread 0x%X\n",
                    pthread_self());                     pthread_self());
 #   endif  #   endif
   
     result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);      result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
 #   ifdef DEBUG_THREADS  #   ifdef DEBUG_THREADS
         GC_printf1("Started thread 0x%X\n", *new_thread);          GC_printf1("Started thread 0x%X\n", *new_thread);
Line 1352  WRAP_FUNC(pthread_create)(pthread_t *new_thread,
Line 1600  WRAP_FUNC(pthread_create)(pthread_t *new_thread,
         LOCK();          LOCK();
         GC_INTERNAL_FREE(si);          GC_INTERNAL_FREE(si);
         UNLOCK();          UNLOCK();
   
     return(result);      return(result);
 }  }
   
Line 1530  void GC_lock()
Line 1779  void GC_lock()
   
 #endif /* !USE_SPINLOCK */  #endif /* !USE_SPINLOCK */
   
 #ifdef PARALLEL_MARK  #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
   
 #ifdef GC_ASSERTIONS  #ifdef GC_ASSERTIONS
   pthread_t GC_mark_lock_holder = NO_THREAD;    pthread_t GC_mark_lock_holder = NO_THREAD;
 #endif  #endif
   
 #ifdef IA64  #if 0
   /* Ugly workaround for a linux threads bug in the final versions      */    /* Ugly workaround for a linux threads bug in the final versions      */
   /* of glibc2.1.  Pthread_mutex_trylock sets the mutex owner           */    /* of glibc2.1.  Pthread_mutex_trylock sets the mutex owner           */
   /* field even when it fails to acquire the mutex.  This causes        */    /* field even when it fails to acquire the mutex.  This causes        */
Line 1550  void GC_lock()
Line 1799  void GC_lock()
   static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;    static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
 #endif  #endif
   
 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;  
   
 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;  static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
   
 void GC_acquire_mark_lock()  void GC_acquire_mark_lock()
Line 1578  void GC_release_mark_lock()
Line 1825  void GC_release_mark_lock()
     }      }
 }  }
   
 void GC_wait_marker()  /* Collector must wait for a freelist builders for 2 reasons:           */
   /* 1) Mark bits may still be getting examined without lock.             */
   /* 2) Partial free lists referenced only by locals may not be scanned   */
   /*    correctly, e.g. if they contain "pointer-free" objects, since the */
   /*    free-list link may be ignored.                                    */
   void GC_wait_builder()
 {  {
     GC_ASSERT(GC_mark_lock_holder == pthread_self());      GC_ASSERT(GC_mark_lock_holder == pthread_self());
 #   ifdef GC_ASSERTIONS  #   ifdef GC_ASSERTIONS
         GC_mark_lock_holder = NO_THREAD;          GC_mark_lock_holder = NO_THREAD;
 #   endif  #   endif
     if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {      if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
         ABORT("pthread_cond_wait failed");          ABORT("pthread_cond_wait failed");
     }      }
     GC_ASSERT(GC_mark_lock_holder == NO_THREAD);      GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
Line 1593  void GC_wait_marker()
Line 1845  void GC_wait_marker()
 #   endif  #   endif
 }  }
   
 void GC_wait_builder()  void GC_wait_for_reclaim()
 {  {
       GC_acquire_mark_lock();
       while (GC_fl_builder_count > 0) {
           GC_wait_builder();
       }
       GC_release_mark_lock();
   }
   
   void GC_notify_all_builder()
   {
     GC_ASSERT(GC_mark_lock_holder == pthread_self());      GC_ASSERT(GC_mark_lock_holder == pthread_self());
       if (pthread_cond_broadcast(&builder_cv) != 0) {
           ABORT("pthread_cond_broadcast failed");
       }
   }
   
   #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
   
   #ifdef PARALLEL_MARK
   
   static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
   
   void GC_wait_marker()
   {
       GC_ASSERT(GC_mark_lock_holder == pthread_self());
 #   ifdef GC_ASSERTIONS  #   ifdef GC_ASSERTIONS
         GC_mark_lock_holder = NO_THREAD;          GC_mark_lock_holder = NO_THREAD;
 #   endif  #   endif
     if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {      if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
         ABORT("pthread_cond_wait failed");          ABORT("pthread_cond_wait failed");
     }      }
     GC_ASSERT(GC_mark_lock_holder == NO_THREAD);      GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
Line 1615  void GC_notify_all_marker()
Line 1890  void GC_notify_all_marker()
     }      }
 }  }
   
 void GC_notify_all_builder()  
 {  
     GC_ASSERT(GC_mark_lock_holder == pthread_self());  
     if (pthread_cond_broadcast(&builder_cv) != 0) {  
         ABORT("pthread_cond_broadcast failed");  
     }  
 }  
   
 void GC_wait_for_reclaim()  
 {  
     GC_acquire_mark_lock();  
     while (GC_fl_builder_count > 0) {  
         GC_wait_builder();  
     }  
     GC_release_mark_lock();  
 }  
 #endif /* PARALLEL_MARK */  #endif /* PARALLEL_MARK */
   
 # endif /* LINUX_THREADS */  # endif /* GC_LINUX_THREADS and friends */
   

Legend:
Removed from v.1.5  
changed lines
  Added in v.1.6

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>