Annotation of OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h, Revision 1.1
1.1 ! noro 1: /*
! 2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
! 3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
! 4: * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
! 5: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
! 6: *
! 7: *
! 8: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
! 9: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
! 10: *
! 11: * Permission is hereby granted to use or copy this program
! 12: * for any purpose, provided the above notices are retained on all copies.
! 13: * Permission to modify the code and to distribute modified code is granted,
! 14: * provided the above notices are retained, and a notice that the code was
! 15: * modified is included with the above copyright notice.
! 16: */
! 17:
! 18: #ifndef GC_LOCKS_H
! 19: #define GC_LOCKS_H
! 20:
! 21: /*
! 22: * Mutual exclusion between allocator/collector routines.
! 23: * Needed if there is more than one allocator thread.
! 24: * FASTLOCK() is assumed to try to acquire the lock in a cheap and
! 25: * dirty way that is acceptable for a few instructions, e.g. by
! 26: * inhibiting preemption. This is assumed to have succeeded only
! 27: * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
! 28: * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
! 29: * If signals cannot be tolerated with the FASTLOCK held, then
! 30: * FASTLOCK should disable signals. The code executed under
! 31: * FASTLOCK is otherwise immune to interruption, provided it is
! 32: * not restarted.
! 33: * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
! 34: * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
! 35: * (There is currently no equivalent for FASTLOCK.)
! 36: *
! 37: * In the PARALLEL_MARK case, we also need to define a number of
! 38: * other inline finctions here:
! 39: * GC_bool GC_compare_and_exchange( volatile GC_word *addr,
! 40: * GC_word old, GC_word new )
! 41: * GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
! 42: * void GC_memory_barrier( )
! 43: *
! 44: */
! 45: # ifdef THREADS
! 46: # ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
! 47: # include "th/PCR_Th.h"
! 48: # include "th/PCR_ThCrSec.h"
! 49: extern struct PCR_Th_MLRep GC_allocate_ml;
! 50: # define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
! 51: # define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
! 52: # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
! 53: # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
! 54: # define FASTLOCK() PCR_ThCrSec_EnterSys()
! 55: /* Here we cheat (a lot): */
! 56: # define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
! 57: /* TRUE if nobody currently holds the lock */
! 58: # define FASTUNLOCK() PCR_ThCrSec_ExitSys()
! 59: # endif
! 60: # ifdef PCR
! 61: # include <base/PCR_Base.h>
! 62: # include <th/PCR_Th.h>
! 63: extern PCR_Th_ML GC_allocate_ml;
! 64: # define DCL_LOCK_STATE \
! 65: PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
! 66: # define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
! 67: # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
! 68: # define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
! 69: # define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
! 70: # define FASTUNLOCK() {\
! 71: if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
! 72: # endif
! 73: # ifdef SRC_M3
! 74: extern GC_word RT0u__inCritical;
! 75: # define LOCK() RT0u__inCritical++
! 76: # define UNLOCK() RT0u__inCritical--
! 77: # endif
! 78: # ifdef SOLARIS_THREADS
! 79: # include <thread.h>
! 80: # include <signal.h>
! 81: extern mutex_t GC_allocate_ml;
! 82: # define LOCK() mutex_lock(&GC_allocate_ml);
! 83: # define UNLOCK() mutex_unlock(&GC_allocate_ml);
! 84: # endif
! 85:
! 86: /* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
! 87: /* acquisition and release. We need this for correct operation of the */
! 88: /* incremental GC. */
! 89: # ifdef __GNUC__
! 90: # if defined(I386)
! 91: inline static int GC_test_and_set(volatile unsigned int *addr) {
! 92: int oldval;
! 93: /* Note: the "xchg" instruction does not need a "lock" prefix */
! 94: __asm__ __volatile__("xchgl %0, %1"
! 95: : "=r"(oldval), "=m"(*(addr))
! 96: : "0"(1), "m"(*(addr)) : "memory");
! 97: return oldval;
! 98: }
! 99: # define GC_TEST_AND_SET_DEFINED
! 100: # endif
! 101: # if defined(IA64)
! 102: inline static int GC_test_and_set(volatile unsigned int *addr) {
! 103: long oldval, n = 1;
! 104: __asm__ __volatile__("xchg4 %0=%1,%2"
! 105: : "=r"(oldval), "=m"(*addr)
! 106: : "r"(n), "1"(*addr) : "memory");
! 107: return oldval;
! 108: }
! 109: # define GC_TEST_AND_SET_DEFINED
! 110: /* Should this handle post-increment addressing?? */
! 111: inline static void GC_clear(volatile unsigned int *addr) {
! 112: __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
! 113: }
! 114: # define GC_CLEAR_DEFINED
! 115: # endif
! 116: # ifdef SPARC
! 117: inline static int GC_test_and_set(volatile unsigned int *addr) {
! 118: int oldval;
! 119:
! 120: __asm__ __volatile__("ldstub %1,%0"
! 121: : "=r"(oldval), "=m"(*addr)
! 122: : "m"(*addr) : "memory");
! 123: return oldval;
! 124: }
! 125: # define GC_TEST_AND_SET_DEFINED
! 126: # endif
! 127: # ifdef M68K
! 128: /* Contributed by Tony Mantler. I'm not sure how well it was */
! 129: /* tested. */
! 130: inline static int GC_test_and_set(volatile unsigned int *addr) {
! 131: char oldval; /* this must be no longer than 8 bits */
! 132:
! 133: /* The return value is semi-phony. */
! 134: /* 'tas' sets bit 7 while the return */
! 135: /* value pretends bit 0 was set */
! 136: __asm__ __volatile__(
! 137: "tas %1@; sne %0; negb %0"
! 138: : "=d" (oldval)
! 139: : "a" (addr) : "memory");
! 140: return oldval;
! 141: }
! 142: # define GC_TEST_AND_SET_DEFINED
! 143: # endif
! 144: # if defined(POWERPC)
! 145: inline static int GC_test_and_set(volatile unsigned int *addr) {
! 146: int oldval;
! 147: int temp = 1; // locked value
! 148:
! 149: __asm__ __volatile__(
! 150: "1:\tlwarx %0,0,%3\n" // load and reserve
! 151: "\tcmpwi %0, 0\n" // if load is
! 152: "\tbne 2f\n" // non-zero, return already set
! 153: "\tstwcx. %2,0,%1\n" // else store conditional
! 154: "\tbne- 1b\n" // retry if lost reservation
! 155: "2:\t\n" // oldval is zero if we set
! 156: : "=&r"(oldval), "=p"(addr)
! 157: : "r"(temp), "1"(addr)
! 158: : "memory");
! 159: return (int)oldval;
! 160: }
! 161: # define GC_TEST_AND_SET_DEFINED
! 162: inline static void GC_clear(volatile unsigned int *addr) {
! 163: __asm__ __volatile__("eieio" ::: "memory");
! 164: *(addr) = 0;
! 165: }
! 166: # define GC_CLEAR_DEFINED
! 167: # endif
! 168: # if defined(ALPHA)
! 169: inline static int GC_test_and_set(volatile unsigned int * addr)
! 170: {
! 171: unsigned long oldvalue;
! 172: unsigned long temp;
! 173:
! 174: __asm__ __volatile__(
! 175: "1: ldl_l %0,%1\n"
! 176: " and %0,%3,%2\n"
! 177: " bne %2,2f\n"
! 178: " xor %0,%3,%0\n"
! 179: " stl_c %0,%1\n"
! 180: " beq %0,3f\n"
! 181: " mb\n"
! 182: "2:\n"
! 183: ".section .text2,\"ax\"\n"
! 184: "3: br 1b\n"
! 185: ".previous"
! 186: :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
! 187: :"Ir" (1), "m" (*addr)
! 188: :"memory");
! 189:
! 190: return oldvalue;
! 191: }
! 192: # define GC_TEST_AND_SET_DEFINED
! 193: /* Should probably also define GC_clear, since it needs */
! 194: /* a memory barrier ?? */
! 195: # endif /* ALPHA */
! 196: # ifdef ARM32
! 197: inline static int GC_test_and_set(volatile unsigned int *addr) {
! 198: int oldval;
! 199: /* SWP on ARM is very similar to XCHG on x86. Doesn't lock the
! 200: * bus because there are no SMP ARM machines. If/when there are,
! 201: * this code will likely need to be updated. */
! 202: /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
! 203: __asm__ __volatile__("swp %0, %1, [%2]"
! 204: : "=r"(oldval)
! 205: : "r"(1), "r"(addr)
! 206: : "memory");
! 207: return oldval;
! 208: }
! 209: # define GC_TEST_AND_SET_DEFINED
! 210: # endif /* ARM32 */
! 211: # endif /* __GNUC__ */
! 212: # if (defined(ALPHA) && !defined(__GNUC__))
! 213: # define GC_test_and_set(addr) __cxx_test_and_set_atomic(addr, 1)
! 214: # define GC_TEST_AND_SET_DEFINED
! 215: # endif
! 216: # if defined(MSWIN32)
! 217: # define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
! 218: # define GC_TEST_AND_SET_DEFINED
! 219: # endif
! 220: # ifdef MIPS
! 221: # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
! 222: || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
! 223: # define GC_test_and_set(addr, v) test_and_set(addr,v)
! 224: # else
! 225: # define GC_test_and_set(addr, v) __test_and_set(addr,v)
! 226: # define GC_clear(addr) __lock_release(addr);
! 227: # define GC_CLEAR_DEFINED
! 228: # endif
! 229: # define GC_TEST_AND_SET_DEFINED
! 230: # endif /* MIPS */
! 231: # if 0 /* defined(HP_PA) */
! 232: /* The official recommendation seems to be to not use ldcw from */
! 233: /* user mode. Since multithreaded incremental collection doesn't */
! 234: /* work anyway on HP_PA, this shouldn't be a major loss. */
! 235:
! 236: /* "set" means 0 and "clear" means 1 here. */
! 237: # define GC_test_and_set(addr) !GC_test_and_clear(addr);
! 238: # define GC_TEST_AND_SET_DEFINED
! 239: # define GC_clear(addr) GC_noop1(addr); *(volatile unsigned int *)addr = 1;
! 240: /* The above needs a memory barrier! */
! 241: # define GC_CLEAR_DEFINED
! 242: # endif
! 243: # if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
! 244: # ifdef __GNUC__
! 245: inline static void GC_clear(volatile unsigned int *addr) {
! 246: /* Try to discourage gcc from moving anything past this. */
! 247: __asm__ __volatile__(" " : : : "memory");
! 248: *(addr) = 0;
! 249: }
! 250: # else
! 251: /* The function call in the following should prevent the */
! 252: /* compiler from moving assignments to below the UNLOCK. */
! 253: # define GC_clear(addr) GC_noop1((word)(addr)); \
! 254: *((volatile unsigned int *)(addr)) = 0;
! 255: # endif
! 256: # define GC_CLEAR_DEFINED
! 257: # endif /* !GC_CLEAR_DEFINED */
! 258:
! 259: # if !defined(GC_TEST_AND_SET_DEFINED)
! 260: # define USE_PTHREAD_LOCKS
! 261: # endif
! 262:
! 263: # if defined(LINUX_THREADS) || defined(OSF1_THREADS) \
! 264: || defined(HPUX_THREADS)
! 265: # define NO_THREAD (pthread_t)(-1)
! 266: # include <pthread.h>
! 267: # if defined(PARALLEL_MARK)
! 268: /* We need compare-and-swap to update mark bits, where it's */
! 269: /* performance critical. If USE_MARK_BYTES is defined, it is */
! 270: /* no longer needed for this purpose. However we use it in */
! 271: /* either case to implement atomic fetch-and-add, though that's */
! 272: /* less performance critical, and could perhaps be done with */
! 273: /* a lock. */
! 274: # if defined(GENERIC_COMPARE_AND_SWAP)
! 275: /* Probably not useful, except for debugging. */
! 276: /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we */
! 277: /* minimize its use. */
! 278: extern pthread_mutex_t GC_compare_and_swap_lock;
! 279:
! 280: /* Note that if GC_word updates are not atomic, a concurrent */
! 281: /* reader should acquire GC_compare_and_swap_lock. On */
! 282: /* currently supported platforms, such updates are atomic. */
! 283: extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
! 284: GC_word old, GC_word new_val);
! 285: # endif /* GENERIC_COMPARE_AND_SWAP */
! 286: # if defined(I386)
! 287: # if !defined(GENERIC_COMPARE_AND_SWAP)
! 288: /* Returns TRUE if the comparison succeeded. */
! 289: inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
! 290: GC_word old,
! 291: GC_word new_val)
! 292: {
! 293: char result;
! 294: __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
! 295: : "=m"(*(addr)), "=r"(result)
! 296: : "r" (new_val), "0"(*(addr)), "a"(old) : "memory");
! 297: return (GC_bool) result;
! 298: }
! 299: # endif /* !GENERIC_COMPARE_AND_SWAP */
! 300: inline static void GC_memory_write_barrier()
! 301: {
! 302: /* We believe the processor ensures at least processor */
! 303: /* consistent ordering. Thus a compiler barrier */
! 304: /* should suffice. */
! 305: __asm__ __volatile__("" : : : "memory");
! 306: }
! 307: # endif /* I386 */
! 308: # if defined(IA64)
! 309: # if !defined(GENERIC_COMPARE_AND_SWAP)
! 310: inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
! 311: GC_word old, GC_word new_val)
! 312: {
! 313: unsigned long oldval;
! 314: __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
! 315: : "=r"(oldval), "=m"(*addr)
! 316: : "r"(new_val), "1"(*addr), "r"(old) : "memory");
! 317: return (oldval == old);
! 318: }
! 319: # endif /* !GENERIC_COMPARE_AND_SWAP */
! 320: # if 0
! 321: /* Shouldn't be needed; we use volatile stores instead. */
! 322: inline static void GC_memory_write_barrier()
! 323: {
! 324: __asm__ __volatile__("mf" : : : "memory");
! 325: }
! 326: # endif /* 0 */
! 327: # endif /* IA64 */
! 328: # if !defined(GENERIC_COMPARE_AND_SWAP)
! 329: /* Returns the original value of *addr. */
! 330: inline static GC_word GC_atomic_add(volatile GC_word *addr,
! 331: GC_word how_much)
! 332: {
! 333: GC_word old;
! 334: do {
! 335: old = *addr;
! 336: } while (!GC_compare_and_exchange(addr, old, old+how_much));
! 337: return old;
! 338: }
! 339: # else /* GENERIC_COMPARE_AND_SWAP */
! 340: /* So long as a GC_word can be atomically updated, it should */
! 341: /* be OK to read *addr without a lock. */
! 342: extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
! 343: # endif /* GENERIC_COMPARE_AND_SWAP */
! 344:
! 345: # endif /* PARALLEL_MARK */
! 346:
! 347: # if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
! 348: /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
! 349: /* be held for long periods, if it is held at all. Thus spinning */
! 350: /* and sleeping for fixed periods are likely to result in */
! 351: /* significant wasted time. We thus rely mostly on queued locks. */
! 352: # define USE_SPIN_LOCK
! 353: extern volatile unsigned int GC_allocate_lock;
! 354: extern void GC_lock(void);
! 355: /* Allocation lock holder. Only set if acquired by client through */
! 356: /* GC_call_with_alloc_lock. */
! 357: # ifdef GC_ASSERTIONS
! 358: # define LOCK() \
! 359: { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
! 360: SET_LOCK_HOLDER(); }
! 361: # define UNLOCK() \
! 362: { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
! 363: GC_clear(&GC_allocate_lock); }
! 364: # else
! 365: # define LOCK() \
! 366: { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
! 367: # define UNLOCK() \
! 368: GC_clear(&GC_allocate_lock)
! 369: # endif /* !GC_ASSERTIONS */
! 370: # if 0
! 371: /* Another alternative for OSF1 might be: */
! 372: # include <sys/mman.h>
! 373: extern msemaphore GC_allocate_semaphore;
! 374: # define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
! 375: != 0) GC_lock(); else GC_allocate_lock = 1; }
! 376: /* The following is INCORRECT, since the memory model is too weak. */
! 377: /* Is this true? Presumably msem_unlock has the right semantics? */
! 378: /* - HB */
! 379: # define UNLOCK() { GC_allocate_lock = 0; \
! 380: msem_unlock(&GC_allocate_semaphore, 0); }
! 381: # endif /* 0 */
! 382: # else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
! 383: # ifndef USE_PTHREAD_LOCKS
! 384: # define USE_PTHREAD_LOCKS
! 385: # endif
! 386: # endif /* THREAD_LOCAL_ALLOC */
! 387: # ifdef USE_PTHREAD_LOCKS
! 388: # include <pthread.h>
! 389: extern pthread_mutex_t GC_allocate_ml;
! 390: # ifdef GC_ASSERTIONS
! 391: # define LOCK() \
! 392: { GC_lock(); \
! 393: SET_LOCK_HOLDER(); }
! 394: # define UNLOCK() \
! 395: { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
! 396: pthread_mutex_unlock(&GC_allocate_ml); }
! 397: # else /* !GC_ASSERTIONS */
! 398: # define LOCK() \
! 399: { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
! 400: # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
! 401: # endif /* !GC_ASSERTIONS */
! 402: # endif /* USE_PTHREAD_LOCKS */
! 403: # define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
! 404: # define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
! 405: # define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
! 406: extern VOLATILE GC_bool GC_collecting;
! 407: # define ENTER_GC() GC_collecting = 1;
! 408: # define EXIT_GC() GC_collecting = 0;
! 409: extern void GC_lock(void);
! 410: extern pthread_t GC_lock_holder;
! 411: # ifdef GC_ASSERTIONS
! 412: extern pthread_t GC_mark_lock_holder;
! 413: # endif
! 414: # endif /* LINUX_THREADS || OSF1_THREADS || HPUX_THREADS */
! 415: # if defined(IRIX_THREADS)
! 416: # include <pthread.h>
! 417: /* This probably should never be included, but I can't test */
! 418: /* on Irix anymore. */
! 419: # include <mutex.h>
! 420:
! 421: extern unsigned long GC_allocate_lock;
! 422: /* This is not a mutex because mutexes that obey the (optional) */
! 423: /* POSIX scheduling rules are subject to convoys in high contention */
! 424: /* applications. This is basically a spin lock. */
! 425: extern pthread_t GC_lock_holder;
! 426: extern void GC_lock(void);
! 427: /* Allocation lock holder. Only set if acquired by client through */
! 428: /* GC_call_with_alloc_lock. */
! 429: # define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
! 430: # define NO_THREAD (pthread_t)(-1)
! 431: # define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
! 432: # define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
! 433: # define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
! 434: # define UNLOCK() GC_clear(&GC_allocate_lock);
! 435: extern VOLATILE GC_bool GC_collecting;
! 436: # define ENTER_GC() \
! 437: { \
! 438: GC_collecting = 1; \
! 439: }
! 440: # define EXIT_GC() GC_collecting = 0;
! 441: # endif /* IRIX_THREADS */
! 442: # ifdef WIN32_THREADS
! 443: # include <windows.h>
! 444: GC_API CRITICAL_SECTION GC_allocate_ml;
! 445: # define LOCK() EnterCriticalSection(&GC_allocate_ml);
! 446: # define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
! 447: # endif
! 448: # ifndef SET_LOCK_HOLDER
! 449: # define SET_LOCK_HOLDER()
! 450: # define UNSET_LOCK_HOLDER()
! 451: # define I_HOLD_LOCK() FALSE
! 452: /* Used on platforms were locks can be reacquired, */
! 453: /* so it doesn't matter if we lie. */
! 454: # endif
! 455: # else /* !THREADS */
! 456: # define LOCK()
! 457: # define UNLOCK()
! 458: # endif /* !THREADS */
! 459: # ifndef SET_LOCK_HOLDER
! 460: # define SET_LOCK_HOLDER()
! 461: # define UNSET_LOCK_HOLDER()
! 462: # define I_HOLD_LOCK() FALSE
! 463: /* Used on platforms were locks can be reacquired, */
! 464: /* so it doesn't matter if we lie. */
! 465: # endif
! 466: # ifndef ENTER_GC
! 467: # define ENTER_GC()
! 468: # define EXIT_GC()
! 469: # endif
! 470:
! 471: # ifndef DCL_LOCK_STATE
! 472: # define DCL_LOCK_STATE
! 473: # endif
! 474: # ifndef FASTLOCK
! 475: # define FASTLOCK() LOCK()
! 476: # define FASTLOCK_SUCCEEDED() TRUE
! 477: # define FASTUNLOCK() UNLOCK()
! 478: # endif
! 479:
! 480: #endif /* GC_LOCKS_H */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>