[BACK]Return to gc_locks.h CVS log [TXT][DIR] Up to [local] / OpenXM_contrib2 / asir2000 / gc / include / private

Annotation of OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h, Revision 1.3

1.1       noro        1: /*
                      2:  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
                      3:  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
                      4:  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
                      5:  * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
                      6:  *
                      7:  *
                      8:  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
                      9:  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
                     10:  *
                     11:  * Permission is hereby granted to use or copy this program
                     12:  * for any purpose,  provided the above notices are retained on all copies.
                     13:  * Permission to modify the code and to distribute modified code is granted,
                     14:  * provided the above notices are retained, and a notice that the code was
                     15:  * modified is included with the above copyright notice.
                     16:  */
                     17:
                     18: #ifndef GC_LOCKS_H
                     19: #define GC_LOCKS_H
                     20:
                     21: /*
                     22:  * Mutual exclusion between allocator/collector routines.
                     23:  * Needed if there is more than one allocator thread.
                     24:  * FASTLOCK() is assumed to try to acquire the lock in a cheap and
                     25:  * dirty way that is acceptable for a few instructions, e.g. by
                     26:  * inhibiting preemption.  This is assumed to have succeeded only
                     27:  * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
                     28:  * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
                     29:  * If signals cannot be tolerated with the FASTLOCK held, then
                     30:  * FASTLOCK should disable signals.  The code executed under
                     31:  * FASTLOCK is otherwise immune to interruption, provided it is
                     32:  * not restarted.
                     33:  * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
                     34:  * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
                     35:  * (There is currently no equivalent for FASTLOCK.)
                     36:  *
                     37:  * In the PARALLEL_MARK case, we also need to define a number of
                     38:  * other inline finctions here:
                     39:  *   GC_bool GC_compare_and_exchange( volatile GC_word *addr,
                     40:  *                                   GC_word old, GC_word new )
                     41:  *   GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
                     42:  *   void GC_memory_barrier( )
                     43:  *
                     44:  */
                     45: # ifdef THREADS
1.3     ! noro       46:    void GC_noop1 GC_PROTO((word));
1.1       noro       47: #  ifdef PCR_OBSOLETE  /* Faster, but broken with multiple lwp's       */
                     48: #    include  "th/PCR_Th.h"
                     49: #    include  "th/PCR_ThCrSec.h"
                     50:      extern struct PCR_Th_MLRep GC_allocate_ml;
                     51: #    define DCL_LOCK_STATE  PCR_sigset_t GC_old_sig_mask
                     52: #    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
                     53: #    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
                     54: #    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
                     55: #    define FASTLOCK() PCR_ThCrSec_EnterSys()
                     56:      /* Here we cheat (a lot): */
                     57: #        define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
                     58:                /* TRUE if nobody currently holds the lock */
                     59: #    define FASTUNLOCK() PCR_ThCrSec_ExitSys()
                     60: #  endif
                     61: #  ifdef PCR
                     62: #    include <base/PCR_Base.h>
                     63: #    include <th/PCR_Th.h>
                     64:      extern PCR_Th_ML GC_allocate_ml;
                     65: #    define DCL_LOCK_STATE \
                     66:         PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
                     67: #    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
                     68: #    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
                     69: #    define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
                     70: #    define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
                     71: #    define FASTUNLOCK()  {\
                     72:         if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
                     73: #  endif
                     74: #  ifdef SRC_M3
                     75:      extern GC_word RT0u__inCritical;
                     76: #    define LOCK() RT0u__inCritical++
                     77: #    define UNLOCK() RT0u__inCritical--
                     78: #  endif
1.3     ! noro       79: #  ifdef GC_SOLARIS_THREADS
1.1       noro       80: #    include <thread.h>
                     81: #    include <signal.h>
                     82:      extern mutex_t GC_allocate_ml;
                     83: #    define LOCK() mutex_lock(&GC_allocate_ml);
                     84: #    define UNLOCK() mutex_unlock(&GC_allocate_ml);
                     85: #  endif
                     86:
                     87: /* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
                     88: /* acquisition and release.  We need this for correct operation of the */
                     89: /* incremental GC.                                                     */
                     90: #  ifdef __GNUC__
                     91: #    if defined(I386)
                     92:        inline static int GC_test_and_set(volatile unsigned int *addr) {
                     93:          int oldval;
                     94:          /* Note: the "xchg" instruction does not need a "lock" prefix */
                     95:          __asm__ __volatile__("xchgl %0, %1"
                     96:                : "=r"(oldval), "=m"(*(addr))
                     97:                : "0"(1), "m"(*(addr)) : "memory");
                     98:          return oldval;
                     99:        }
                    100: #      define GC_TEST_AND_SET_DEFINED
                    101: #    endif
                    102: #    if defined(IA64)
                    103:        inline static int GC_test_and_set(volatile unsigned int *addr) {
                    104:          long oldval, n = 1;
                    105:          __asm__ __volatile__("xchg4 %0=%1,%2"
                    106:                : "=r"(oldval), "=m"(*addr)
                    107:                : "r"(n), "1"(*addr) : "memory");
                    108:          return oldval;
                    109:        }
                    110: #      define GC_TEST_AND_SET_DEFINED
                    111:        /* Should this handle post-increment addressing?? */
                    112:        inline static void GC_clear(volatile unsigned int *addr) {
                    113:         __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
                    114:        }
                    115: #      define GC_CLEAR_DEFINED
                    116: #    endif
                    117: #    ifdef SPARC
                    118:        inline static int GC_test_and_set(volatile unsigned int *addr) {
                    119:         int oldval;
                    120:
                    121:         __asm__ __volatile__("ldstub %1,%0"
                    122:         : "=r"(oldval), "=m"(*addr)
                    123:         : "m"(*addr) : "memory");
                    124:         return oldval;
                    125:        }
                    126: #      define GC_TEST_AND_SET_DEFINED
                    127: #    endif
                    128: #    ifdef M68K
                    129:        /* Contributed by Tony Mantler.  I'm not sure how well it was   */
                    130:        /* tested.                                                      */
                    131:        inline static int GC_test_and_set(volatile unsigned int *addr) {
                    132:           char oldval; /* this must be no longer than 8 bits */
                    133:
                    134:           /* The return value is semi-phony. */
                    135:           /* 'tas' sets bit 7 while the return */
                    136:           /* value pretends bit 0 was set */
                    137:           __asm__ __volatile__(
                    138:                  "tas %1@; sne %0; negb %0"
                    139:                  : "=d" (oldval)
                    140:                  : "a" (addr) : "memory");
                    141:           return oldval;
                    142:        }
                    143: #      define GC_TEST_AND_SET_DEFINED
                    144: #    endif
                    145: #    if defined(POWERPC)
                    146:         inline static int GC_test_and_set(volatile unsigned int *addr) {
                    147:           int oldval;
1.3     ! noro      148:           int temp = 1; /* locked value */
1.1       noro      149:
                    150:           __asm__ __volatile__(
1.3     ! noro      151:                "1:\tlwarx %0,0,%3\n"   /* load and reserve               */
        !           152:                "\tcmpwi %0, 0\n"       /* if load is                     */
        !           153:                "\tbne 2f\n"            /*   non-zero, return already set */
        !           154:                "\tstwcx. %2,0,%1\n"    /* else store conditional         */
        !           155:                "\tbne- 1b\n"           /* retry if lost reservation      */
        !           156:                "2:\t\n"                /* oldval is zero if we set       */
1.1       noro      157:               : "=&r"(oldval), "=p"(addr)
                    158:               : "r"(temp), "1"(addr)
                    159:               : "memory");
                    160:           return (int)oldval;
                    161:         }
                    162: #       define GC_TEST_AND_SET_DEFINED
                    163:         inline static void GC_clear(volatile unsigned int *addr) {
1.3     ! noro      164:          __asm__ __volatile__("eieio" : : : "memory");
1.1       noro      165:           *(addr) = 0;
                    166:         }
                    167: #       define GC_CLEAR_DEFINED
                    168: #    endif
                    169: #    if defined(ALPHA)
                    170:         inline static int GC_test_and_set(volatile unsigned int * addr)
                    171:         {
                    172:           unsigned long oldvalue;
                    173:           unsigned long temp;
                    174:
                    175:           __asm__ __volatile__(
                    176:                              "1:     ldl_l %0,%1\n"
                    177:                              "       and %0,%3,%2\n"
                    178:                              "       bne %2,2f\n"
                    179:                              "       xor %0,%3,%0\n"
                    180:                              "       stl_c %0,%1\n"
                    181:                              "       beq %0,3f\n"
                    182:                              "       mb\n"
                    183:                              "2:\n"
                    184:                              ".section .text2,\"ax\"\n"
                    185:                              "3:     br 1b\n"
                    186:                              ".previous"
                    187:                              :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
                    188:                              :"Ir" (1), "m" (*addr)
                    189:                             :"memory");
                    190:
                    191:           return oldvalue;
                    192:         }
                    193: #       define GC_TEST_AND_SET_DEFINED
                    194:         /* Should probably also define GC_clear, since it needs        */
                    195:         /* a memory barrier ??                                 */
                    196: #    endif /* ALPHA */
                    197: #    ifdef ARM32
                    198:         inline static int GC_test_and_set(volatile unsigned int *addr) {
                    199:           int oldval;
                    200:           /* SWP on ARM is very similar to XCHG on x86.  Doesn't lock the
                    201:            * bus because there are no SMP ARM machines.  If/when there are,
                    202:            * this code will likely need to be updated. */
                    203:           /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
                    204:           __asm__ __volatile__("swp %0, %1, [%2]"
                    205:                             : "=r"(oldval)
                    206:                             : "r"(1), "r"(addr)
                    207:                             : "memory");
                    208:           return oldval;
                    209:         }
                    210: #       define GC_TEST_AND_SET_DEFINED
                    211: #    endif /* ARM32 */
                    212: #  endif /* __GNUC__ */
                    213: #  if (defined(ALPHA) && !defined(__GNUC__))
                    214: #    define GC_test_and_set(addr) __cxx_test_and_set_atomic(addr, 1)
                    215: #    define GC_TEST_AND_SET_DEFINED
                    216: #  endif
                    217: #  if defined(MSWIN32)
                    218: #    define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
                    219: #    define GC_TEST_AND_SET_DEFINED
                    220: #  endif
                    221: #  ifdef MIPS
1.3     ! noro      222: #    ifdef LINUX
        !           223: #      include <sys/tas.h>
        !           224: #      define GC_test_and_set(addr) _test_and_set((int *) addr,1)
        !           225: #      define GC_TEST_AND_SET_DEFINED
        !           226: #    elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
1.1       noro      227:        || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
1.3     ! noro      228: #       ifdef __GNUC__
        !           229: #          define GC_test_and_set(addr) _test_and_set(addr,1)
        !           230: #       else
        !           231: #          define GC_test_and_set(addr) test_and_set(addr,1)
        !           232: #       endif
1.1       noro      233: #    else
1.3     ! noro      234: #       define GC_test_and_set(addr) __test_and_set(addr,1)
1.1       noro      235: #       define GC_clear(addr) __lock_release(addr);
                    236: #       define GC_CLEAR_DEFINED
                    237: #    endif
                    238: #    define GC_TEST_AND_SET_DEFINED
                    239: #  endif /* MIPS */
                    240: #  if 0 /* defined(HP_PA) */
                    241:      /* The official recommendation seems to be to not use ldcw from   */
                    242:      /* user mode.  Since multithreaded incremental collection doesn't */
                    243:      /* work anyway on HP_PA, this shouldn't be a major loss.          */
                    244:
                    245:      /* "set" means 0 and "clear" means 1 here.                */
                    246: #    define GC_test_and_set(addr) !GC_test_and_clear(addr);
                    247: #    define GC_TEST_AND_SET_DEFINED
1.3     ! noro      248: #    define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
1.1       noro      249:        /* The above needs a memory barrier! */
                    250: #    define GC_CLEAR_DEFINED
                    251: #  endif
                    252: #  if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
                    253: #    ifdef __GNUC__
                    254:        inline static void GC_clear(volatile unsigned int *addr) {
                    255:          /* Try to discourage gcc from moving anything past this. */
                    256:          __asm__ __volatile__(" " : : : "memory");
                    257:          *(addr) = 0;
                    258:        }
                    259: #    else
                    260:            /* The function call in the following should prevent the    */
                    261:            /* compiler from moving assignments to below the UNLOCK.    */
                    262: #      define GC_clear(addr) GC_noop1((word)(addr)); \
                    263:                             *((volatile unsigned int *)(addr)) = 0;
                    264: #    endif
                    265: #    define GC_CLEAR_DEFINED
                    266: #  endif /* !GC_CLEAR_DEFINED */
                    267:
                    268: #  if !defined(GC_TEST_AND_SET_DEFINED)
                    269: #    define USE_PTHREAD_LOCKS
                    270: #  endif
                    271:
1.3     ! noro      272: #  if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
        !           273:       && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS)
1.1       noro      274: #    define NO_THREAD (pthread_t)(-1)
                    275: #    include <pthread.h>
                    276: #    if defined(PARALLEL_MARK)
                    277:       /* We need compare-and-swap to update mark bits, where it's      */
                    278:       /* performance critical.  If USE_MARK_BYTES is defined, it is    */
                    279:       /* no longer needed for this purpose.  However we use it in      */
                    280:       /* either case to implement atomic fetch-and-add, though that's  */
                    281:       /* less performance critical, and could perhaps be done with     */
                    282:       /* a lock.                                                       */
                    283: #     if defined(GENERIC_COMPARE_AND_SWAP)
                    284:        /* Probably not useful, except for debugging.   */
                    285:        /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we        */
                    286:        /* minimize its use.                                            */
                    287:        extern pthread_mutex_t GC_compare_and_swap_lock;
                    288:
                    289:        /* Note that if GC_word updates are not atomic, a concurrent    */
                    290:        /* reader should acquire GC_compare_and_swap_lock.  On          */
                    291:        /* currently supported platforms, such updates are atomic.      */
                    292:        extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
                    293:                                               GC_word old, GC_word new_val);
                    294: #     endif /* GENERIC_COMPARE_AND_SWAP */
                    295: #     if defined(I386)
                    296: #      if !defined(GENERIC_COMPARE_AND_SWAP)
                    297:          /* Returns TRUE if the comparison succeeded. */
                    298:          inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
                    299:                                                       GC_word old,
                    300:                                                       GC_word new_val)
                    301:          {
                    302:           char result;
                    303:           __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
                    304:                : "=m"(*(addr)), "=r"(result)
                    305:                : "r" (new_val), "0"(*(addr)), "a"(old) : "memory");
                    306:           return (GC_bool) result;
                    307:          }
                    308: #      endif /* !GENERIC_COMPARE_AND_SWAP */
                    309:        inline static void GC_memory_write_barrier()
                    310:        {
                    311:         /* We believe the processor ensures at least processor */
                    312:         /* consistent ordering.  Thus a compiler barrier       */
                    313:         /* should suffice.                                     */
                    314:          __asm__ __volatile__("" : : : "memory");
                    315:        }
                    316: #     endif /* I386 */
                    317: #     if defined(IA64)
                    318: #      if !defined(GENERIC_COMPARE_AND_SWAP)
                    319:          inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
                    320:                                                       GC_word old, GC_word new_val)
                    321:         {
                    322:          unsigned long oldval;
                    323:          __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
                    324:                : "=r"(oldval), "=m"(*addr)
                    325:                : "r"(new_val), "1"(*addr), "r"(old) : "memory");
                    326:          return (oldval == old);
                    327:          }
                    328: #      endif /* !GENERIC_COMPARE_AND_SWAP */
                    329: #      if 0
                    330:        /* Shouldn't be needed; we use volatile stores instead. */
                    331:         inline static void GC_memory_write_barrier()
                    332:         {
                    333:           __asm__ __volatile__("mf" : : : "memory");
                    334:         }
                    335: #      endif /* 0 */
                    336: #     endif /* IA64 */
                    337: #     if !defined(GENERIC_COMPARE_AND_SWAP)
                    338:         /* Returns the original value of *addr.        */
                    339:         inline static GC_word GC_atomic_add(volatile GC_word *addr,
                    340:                                            GC_word how_much)
                    341:         {
                    342:          GC_word old;
                    343:          do {
                    344:            old = *addr;
                    345:          } while (!GC_compare_and_exchange(addr, old, old+how_much));
                    346:           return old;
                    347:         }
                    348: #     else /* GENERIC_COMPARE_AND_SWAP */
                    349:        /* So long as a GC_word can be atomically updated, it should    */
                    350:        /* be OK to read *addr without a lock.                          */
                    351:        extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
                    352: #     endif /* GENERIC_COMPARE_AND_SWAP */
                    353:
                    354: #    endif /* PARALLEL_MARK */
                    355:
                    356: #    if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
                    357:       /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to  */
                    358:       /* be held for long periods, if it is held at all.  Thus spinning        */
                    359:       /* and sleeping for fixed periods are likely to result in        */
                    360:       /* significant wasted time.  We thus rely mostly on queued locks. */
                    361: #     define USE_SPIN_LOCK
                    362:       extern volatile unsigned int GC_allocate_lock;
                    363:       extern void GC_lock(void);
                    364:        /* Allocation lock holder.  Only set if acquired by client through */
                    365:        /* GC_call_with_alloc_lock.                                        */
                    366: #     ifdef GC_ASSERTIONS
                    367: #        define LOCK() \
                    368:                { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
                    369:                  SET_LOCK_HOLDER(); }
                    370: #        define UNLOCK() \
                    371:                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
                    372:                  GC_clear(&GC_allocate_lock); }
                    373: #     else
                    374: #        define LOCK() \
                    375:                { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
                    376: #        define UNLOCK() \
                    377:                GC_clear(&GC_allocate_lock)
                    378: #     endif /* !GC_ASSERTIONS */
                    379: #     if 0
                    380:        /* Another alternative for OSF1 might be:               */
                    381: #       include <sys/mman.h>
                    382:         extern msemaphore GC_allocate_semaphore;
                    383: #       define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
                    384:                            != 0) GC_lock(); else GC_allocate_lock = 1; }
                    385:         /* The following is INCORRECT, since the memory model is too weak. */
                    386:        /* Is this true?  Presumably msem_unlock has the right semantics?  */
                    387:        /*              - HB                                               */
                    388: #       define UNLOCK() { GC_allocate_lock = 0; \
                    389:                           msem_unlock(&GC_allocate_semaphore, 0); }
                    390: #     endif /* 0 */
                    391: #    else /* THREAD_LOCAL_ALLOC  || USE_PTHREAD_LOCKS */
                    392: #      ifndef USE_PTHREAD_LOCKS
                    393: #        define USE_PTHREAD_LOCKS
                    394: #      endif
                    395: #    endif /* THREAD_LOCAL_ALLOC */
                    396: #   ifdef USE_PTHREAD_LOCKS
                    397: #      include <pthread.h>
                    398:        extern pthread_mutex_t GC_allocate_ml;
                    399: #      ifdef GC_ASSERTIONS
                    400: #        define LOCK() \
                    401:                { GC_lock(); \
                    402:                  SET_LOCK_HOLDER(); }
                    403: #        define UNLOCK() \
                    404:                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
                    405:                  pthread_mutex_unlock(&GC_allocate_ml); }
                    406: #      else /* !GC_ASSERTIONS */
                    407: #        define LOCK() \
                    408:           { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
                    409: #        define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
                    410: #      endif /* !GC_ASSERTIONS */
                    411: #   endif /* USE_PTHREAD_LOCKS */
                    412: #   define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
                    413: #   define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
                    414: #   define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
                    415:     extern VOLATILE GC_bool GC_collecting;
                    416: #   define ENTER_GC() GC_collecting = 1;
                    417: #   define EXIT_GC() GC_collecting = 0;
                    418:     extern void GC_lock(void);
                    419:     extern pthread_t GC_lock_holder;
                    420: #   ifdef GC_ASSERTIONS
                    421:       extern pthread_t GC_mark_lock_holder;
                    422: #   endif
1.3     ! noro      423: #  endif /* GC_PTHREADS with linux_threads.c implementation */
        !           424: #  if defined(GC_IRIX_THREADS)
1.1       noro      425: #    include <pthread.h>
                    426:      /* This probably should never be included, but I can't test       */
                    427:      /* on Irix anymore.                                               */
                    428: #    include <mutex.h>
                    429:
                    430:      extern unsigned long GC_allocate_lock;
                    431:        /* This is not a mutex because mutexes that obey the (optional)         */
                    432:        /* POSIX scheduling rules are subject to convoys in high contention     */
                    433:        /* applications.  This is basically a spin lock.                        */
                    434:      extern pthread_t GC_lock_holder;
                    435:      extern void GC_lock(void);
                    436:        /* Allocation lock holder.  Only set if acquired by client through */
                    437:        /* GC_call_with_alloc_lock.                                        */
                    438: #    define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
                    439: #    define NO_THREAD (pthread_t)(-1)
                    440: #    define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
                    441: #    define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
1.3     ! noro      442: #    define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
1.1       noro      443: #    define UNLOCK() GC_clear(&GC_allocate_lock);
                    444:      extern VOLATILE GC_bool GC_collecting;
                    445: #    define ENTER_GC() \
                    446:                { \
                    447:                    GC_collecting = 1; \
                    448:                }
                    449: #    define EXIT_GC() GC_collecting = 0;
1.3     ! noro      450: #  endif /* GC_IRIX_THREADS */
        !           451: #  if defined(GC_WIN32_THREADS)
        !           452: #    if defined(GC_PTHREADS)
        !           453: #      include <pthread.h>
        !           454:        extern pthread_mutex_t GC_allocate_ml;
        !           455: #      define LOCK()   pthread_mutex_lock(&GC_allocate_ml)
        !           456: #      define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
        !           457: #    else
        !           458: #      include <windows.h>
        !           459:        GC_API CRITICAL_SECTION GC_allocate_ml;
        !           460: #      define LOCK() EnterCriticalSection(&GC_allocate_ml);
        !           461: #      define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
        !           462: #    endif
1.1       noro      463: #  endif
                    464: #  ifndef SET_LOCK_HOLDER
                    465: #      define SET_LOCK_HOLDER()
                    466: #      define UNSET_LOCK_HOLDER()
                    467: #      define I_HOLD_LOCK() FALSE
                    468:                /* Used on platforms were locks can be reacquired,      */
                    469:                /* so it doesn't matter if we lie.                      */
                    470: #  endif
                    471: # else /* !THREADS */
                    472: #    define LOCK()
                    473: #    define UNLOCK()
                    474: # endif /* !THREADS */
                    475: # ifndef SET_LOCK_HOLDER
                    476: #   define SET_LOCK_HOLDER()
                    477: #   define UNSET_LOCK_HOLDER()
                    478: #   define I_HOLD_LOCK() FALSE
                    479:                /* Used on platforms were locks can be reacquired,      */
                    480:                /* so it doesn't matter if we lie.                      */
                    481: # endif
                    482: # ifndef ENTER_GC
                    483: #   define ENTER_GC()
                    484: #   define EXIT_GC()
                    485: # endif
                    486:
                    487: # ifndef DCL_LOCK_STATE
                    488: #   define DCL_LOCK_STATE
                    489: # endif
                    490: # ifndef FASTLOCK
                    491: #   define FASTLOCK() LOCK()
                    492: #   define FASTLOCK_SUCCEEDED() TRUE
                    493: #   define FASTUNLOCK() UNLOCK()
                    494: # endif
                    495:
                    496: #endif /* GC_LOCKS_H */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>