[BACK]Return to gc_locks.h CVS log [TXT][DIR] Up to [local] / OpenXM_contrib2 / asir2000 / gc / include / private

Annotation of OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h, Revision 1.4

1.1       noro        1: /*
                      2:  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
                      3:  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
                      4:  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
                      5:  * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
                      6:  *
                      7:  *
                      8:  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
                      9:  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
                     10:  *
                     11:  * Permission is hereby granted to use or copy this program
                     12:  * for any purpose,  provided the above notices are retained on all copies.
                     13:  * Permission to modify the code and to distribute modified code is granted,
                     14:  * provided the above notices are retained, and a notice that the code was
                     15:  * modified is included with the above copyright notice.
                     16:  */
                     17:
                     18: #ifndef GC_LOCKS_H
                     19: #define GC_LOCKS_H
                     20:
                     21: /*
                     22:  * Mutual exclusion between allocator/collector routines.
                     23:  * Needed if there is more than one allocator thread.
                     24:  * FASTLOCK() is assumed to try to acquire the lock in a cheap and
                     25:  * dirty way that is acceptable for a few instructions, e.g. by
                     26:  * inhibiting preemption.  This is assumed to have succeeded only
                     27:  * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
                     28:  * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
                     29:  * If signals cannot be tolerated with the FASTLOCK held, then
                     30:  * FASTLOCK should disable signals.  The code executed under
                     31:  * FASTLOCK is otherwise immune to interruption, provided it is
                     32:  * not restarted.
                     33:  * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
                     34:  * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
                     35:  * (There is currently no equivalent for FASTLOCK.)
                     36:  *
                     37:  * In the PARALLEL_MARK case, we also need to define a number of
                     38:  * other inline finctions here:
                     39:  *   GC_bool GC_compare_and_exchange( volatile GC_word *addr,
                     40:  *                                   GC_word old, GC_word new )
                     41:  *   GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
                     42:  *   void GC_memory_barrier( )
                     43:  *
                     44:  */
                     45: # ifdef THREADS
1.3       noro       46:    void GC_noop1 GC_PROTO((word));
1.1       noro       47: #  ifdef PCR_OBSOLETE  /* Faster, but broken with multiple lwp's       */
                     48: #    include  "th/PCR_Th.h"
                     49: #    include  "th/PCR_ThCrSec.h"
                     50:      extern struct PCR_Th_MLRep GC_allocate_ml;
                     51: #    define DCL_LOCK_STATE  PCR_sigset_t GC_old_sig_mask
                     52: #    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
                     53: #    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
                     54: #    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
                     55: #    define FASTLOCK() PCR_ThCrSec_EnterSys()
                     56:      /* Here we cheat (a lot): */
                     57: #        define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
                     58:                /* TRUE if nobody currently holds the lock */
                     59: #    define FASTUNLOCK() PCR_ThCrSec_ExitSys()
                     60: #  endif
                     61: #  ifdef PCR
                     62: #    include <base/PCR_Base.h>
                     63: #    include <th/PCR_Th.h>
                     64:      extern PCR_Th_ML GC_allocate_ml;
                     65: #    define DCL_LOCK_STATE \
                     66:         PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
                     67: #    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
                     68: #    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
                     69: #    define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
                     70: #    define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
                     71: #    define FASTUNLOCK()  {\
                     72:         if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
                     73: #  endif
                     74: #  ifdef SRC_M3
                     75:      extern GC_word RT0u__inCritical;
                     76: #    define LOCK() RT0u__inCritical++
                     77: #    define UNLOCK() RT0u__inCritical--
                     78: #  endif
1.3       noro       79: #  ifdef GC_SOLARIS_THREADS
1.1       noro       80: #    include <thread.h>
                     81: #    include <signal.h>
                     82:      extern mutex_t GC_allocate_ml;
                     83: #    define LOCK() mutex_lock(&GC_allocate_ml);
                     84: #    define UNLOCK() mutex_unlock(&GC_allocate_ml);
                     85: #  endif
                     86:
                     87: /* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
                     88: /* acquisition and release.  We need this for correct operation of the */
                     89: /* incremental GC.                                                     */
                     90: #  ifdef __GNUC__
                     91: #    if defined(I386)
                     92:        inline static int GC_test_and_set(volatile unsigned int *addr) {
                     93:          int oldval;
                     94:          /* Note: the "xchg" instruction does not need a "lock" prefix */
                     95:          __asm__ __volatile__("xchgl %0, %1"
                     96:                : "=r"(oldval), "=m"(*(addr))
                     97:                : "0"(1), "m"(*(addr)) : "memory");
                     98:          return oldval;
                     99:        }
                    100: #      define GC_TEST_AND_SET_DEFINED
                    101: #    endif
                    102: #    if defined(IA64)
                    103:        inline static int GC_test_and_set(volatile unsigned int *addr) {
                    104:          long oldval, n = 1;
                    105:          __asm__ __volatile__("xchg4 %0=%1,%2"
                    106:                : "=r"(oldval), "=m"(*addr)
                    107:                : "r"(n), "1"(*addr) : "memory");
                    108:          return oldval;
                    109:        }
                    110: #      define GC_TEST_AND_SET_DEFINED
                    111:        /* Should this handle post-increment addressing?? */
                    112:        inline static void GC_clear(volatile unsigned int *addr) {
                    113:         __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
                    114:        }
                    115: #      define GC_CLEAR_DEFINED
                    116: #    endif
                    117: #    ifdef SPARC
                    118:        inline static int GC_test_and_set(volatile unsigned int *addr) {
                    119:         int oldval;
                    120:
                    121:         __asm__ __volatile__("ldstub %1,%0"
                    122:         : "=r"(oldval), "=m"(*addr)
                    123:         : "m"(*addr) : "memory");
                    124:         return oldval;
                    125:        }
                    126: #      define GC_TEST_AND_SET_DEFINED
                    127: #    endif
                    128: #    ifdef M68K
                    129:        /* Contributed by Tony Mantler.  I'm not sure how well it was   */
                    130:        /* tested.                                                      */
                    131:        inline static int GC_test_and_set(volatile unsigned int *addr) {
                    132:           char oldval; /* this must be no longer than 8 bits */
                    133:
                    134:           /* The return value is semi-phony. */
                    135:           /* 'tas' sets bit 7 while the return */
                    136:           /* value pretends bit 0 was set */
                    137:           __asm__ __volatile__(
                    138:                  "tas %1@; sne %0; negb %0"
                    139:                  : "=d" (oldval)
                    140:                  : "a" (addr) : "memory");
                    141:           return oldval;
                    142:        }
                    143: #      define GC_TEST_AND_SET_DEFINED
                    144: #    endif
                    145: #    if defined(POWERPC)
                    146:         inline static int GC_test_and_set(volatile unsigned int *addr) {
                    147:           int oldval;
1.3       noro      148:           int temp = 1; /* locked value */
1.1       noro      149:
                    150:           __asm__ __volatile__(
1.3       noro      151:                "1:\tlwarx %0,0,%3\n"   /* load and reserve               */
                    152:                "\tcmpwi %0, 0\n"       /* if load is                     */
                    153:                "\tbne 2f\n"            /*   non-zero, return already set */
                    154:                "\tstwcx. %2,0,%1\n"    /* else store conditional         */
                    155:                "\tbne- 1b\n"           /* retry if lost reservation      */
1.4     ! noro      156:                "\tsync\n"              /* import barrier                 */
1.3       noro      157:                "2:\t\n"                /* oldval is zero if we set       */
1.1       noro      158:               : "=&r"(oldval), "=p"(addr)
                    159:               : "r"(temp), "1"(addr)
1.4     ! noro      160:               : "cr0","memory");
        !           161:           return oldval;
1.1       noro      162:         }
                    163: #       define GC_TEST_AND_SET_DEFINED
                    164:         inline static void GC_clear(volatile unsigned int *addr) {
1.3       noro      165:          __asm__ __volatile__("eieio" : : : "memory");
1.1       noro      166:           *(addr) = 0;
                    167:         }
                    168: #       define GC_CLEAR_DEFINED
                    169: #    endif
                    170: #    if defined(ALPHA)
                    171:         inline static int GC_test_and_set(volatile unsigned int * addr)
                    172:         {
                    173:           unsigned long oldvalue;
                    174:           unsigned long temp;
                    175:
                    176:           __asm__ __volatile__(
                    177:                              "1:     ldl_l %0,%1\n"
                    178:                              "       and %0,%3,%2\n"
                    179:                              "       bne %2,2f\n"
                    180:                              "       xor %0,%3,%0\n"
                    181:                              "       stl_c %0,%1\n"
                    182:                              "       beq %0,3f\n"
                    183:                              "       mb\n"
                    184:                              "2:\n"
                    185:                              ".section .text2,\"ax\"\n"
                    186:                              "3:     br 1b\n"
                    187:                              ".previous"
                    188:                              :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
                    189:                              :"Ir" (1), "m" (*addr)
                    190:                             :"memory");
                    191:
                    192:           return oldvalue;
                    193:         }
                    194: #       define GC_TEST_AND_SET_DEFINED
1.4     ! noro      195:         inline static void GC_clear(volatile unsigned int *addr) {
        !           196:           __asm__ __volatile__("mb" : : : "memory");
        !           197:           *(addr) = 0;
        !           198:         }
        !           199: #       define GC_CLEAR_DEFINED
1.1       noro      200: #    endif /* ALPHA */
                    201: #    ifdef ARM32
                    202:         inline static int GC_test_and_set(volatile unsigned int *addr) {
                    203:           int oldval;
                    204:           /* SWP on ARM is very similar to XCHG on x86.  Doesn't lock the
                    205:            * bus because there are no SMP ARM machines.  If/when there are,
                    206:            * this code will likely need to be updated. */
                    207:           /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
                    208:           __asm__ __volatile__("swp %0, %1, [%2]"
                    209:                             : "=r"(oldval)
                    210:                             : "r"(1), "r"(addr)
                    211:                             : "memory");
                    212:           return oldval;
                    213:         }
                    214: #       define GC_TEST_AND_SET_DEFINED
                    215: #    endif /* ARM32 */
1.4     ! noro      216: #    ifdef S390
        !           217:        inline static int GC_test_and_set(volatile unsigned int *addr) {
        !           218:          int ret;
        !           219:          __asm__ __volatile__ (
        !           220:           "     l     %0,0(%2)\n"
        !           221:           "0:   cs    %0,%1,0(%2)\n"
        !           222:           "     jl    0b"
        !           223:           : "=&d" (ret)
        !           224:           : "d" (1), "a" (addr)
        !           225:           : "cc", "memory");
        !           226:          return ret;
        !           227:        }
        !           228: #    endif
1.1       noro      229: #  endif /* __GNUC__ */
                    230: #  if (defined(ALPHA) && !defined(__GNUC__))
1.4     ! noro      231: #    ifndef OSF1
        !           232:        --> We currently assume that if gcc is not used, we are
        !           233:        --> running under Tru64.
        !           234: #    endif
        !           235: #    include <machine/builtins.h>
        !           236: #    include <c_asm.h>
        !           237: #    define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
1.1       noro      238: #    define GC_TEST_AND_SET_DEFINED
1.4     ! noro      239: #    define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
        !           240: #    define GC_CLEAR_DEFINED
1.1       noro      241: #  endif
                    242: #  if defined(MSWIN32)
                    243: #    define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
                    244: #    define GC_TEST_AND_SET_DEFINED
                    245: #  endif
                    246: #  ifdef MIPS
1.3       noro      247: #    ifdef LINUX
                    248: #      include <sys/tas.h>
                    249: #      define GC_test_and_set(addr) _test_and_set((int *) addr,1)
                    250: #      define GC_TEST_AND_SET_DEFINED
                    251: #    elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
1.1       noro      252:        || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
1.3       noro      253: #       ifdef __GNUC__
1.4     ! noro      254: #          define GC_test_and_set(addr) _test_and_set((void *)addr,1)
1.3       noro      255: #       else
1.4     ! noro      256: #          define GC_test_and_set(addr) test_and_set((void *)addr,1)
1.3       noro      257: #       endif
1.1       noro      258: #    else
1.4     ! noro      259: #       define GC_test_and_set(addr) __test_and_set32((void *)addr,1)
1.1       noro      260: #       define GC_clear(addr) __lock_release(addr);
                    261: #       define GC_CLEAR_DEFINED
                    262: #    endif
                    263: #    define GC_TEST_AND_SET_DEFINED
                    264: #  endif /* MIPS */
1.4     ! noro      265: #  if defined(_AIX)
        !           266: #    include <sys/atomic_op.h>
        !           267: #    if (defined(_POWER) || defined(_POWERPC))
        !           268: #      if defined(__GNUC__)
        !           269:          inline static void GC_memsync() {
        !           270:            __asm__ __volatile__ ("sync" : : : "memory");
        !           271:          }
        !           272: #      else
        !           273: #        ifndef inline
        !           274: #          define inline __inline
        !           275: #        endif
        !           276: #        pragma mc_func GC_memsync { \
        !           277:            "7c0004ac" /* sync (same opcode used for dcs)*/ \
        !           278:          }
        !           279: #      endif
        !           280: #    else
        !           281: #    error dont know how to memsync
        !           282: #    endif
        !           283:      inline static int GC_test_and_set(volatile unsigned int * addr) {
        !           284:           int oldvalue = 0;
        !           285:           if (compare_and_swap((void *)addr, &oldvalue, 1)) {
        !           286:             GC_memsync();
        !           287:             return 0;
        !           288:           } else return 1;
        !           289:      }
        !           290: #    define GC_TEST_AND_SET_DEFINED
        !           291:      inline static void GC_clear(volatile unsigned int *addr) {
        !           292:           GC_memsync();
        !           293:           *(addr) = 0;
        !           294:      }
        !           295: #    define GC_CLEAR_DEFINED
        !           296:
        !           297: #  endif
1.1       noro      298: #  if 0 /* defined(HP_PA) */
                    299:      /* The official recommendation seems to be to not use ldcw from   */
                    300:      /* user mode.  Since multithreaded incremental collection doesn't */
                    301:      /* work anyway on HP_PA, this shouldn't be a major loss.          */
                    302:
                    303:      /* "set" means 0 and "clear" means 1 here.                */
                    304: #    define GC_test_and_set(addr) !GC_test_and_clear(addr);
                    305: #    define GC_TEST_AND_SET_DEFINED
1.3       noro      306: #    define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
1.1       noro      307:        /* The above needs a memory barrier! */
                    308: #    define GC_CLEAR_DEFINED
                    309: #  endif
                    310: #  if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
                    311: #    ifdef __GNUC__
                    312:        inline static void GC_clear(volatile unsigned int *addr) {
                    313:          /* Try to discourage gcc from moving anything past this. */
                    314:          __asm__ __volatile__(" " : : : "memory");
                    315:          *(addr) = 0;
                    316:        }
                    317: #    else
                    318:            /* The function call in the following should prevent the    */
                    319:            /* compiler from moving assignments to below the UNLOCK.    */
                    320: #      define GC_clear(addr) GC_noop1((word)(addr)); \
                    321:                             *((volatile unsigned int *)(addr)) = 0;
                    322: #    endif
                    323: #    define GC_CLEAR_DEFINED
                    324: #  endif /* !GC_CLEAR_DEFINED */
                    325:
                    326: #  if !defined(GC_TEST_AND_SET_DEFINED)
                    327: #    define USE_PTHREAD_LOCKS
                    328: #  endif
                    329:
1.3       noro      330: #  if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
                    331:       && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS)
1.1       noro      332: #    define NO_THREAD (pthread_t)(-1)
                    333: #    include <pthread.h>
                    334: #    if defined(PARALLEL_MARK)
                    335:       /* We need compare-and-swap to update mark bits, where it's      */
                    336:       /* performance critical.  If USE_MARK_BYTES is defined, it is    */
                    337:       /* no longer needed for this purpose.  However we use it in      */
                    338:       /* either case to implement atomic fetch-and-add, though that's  */
                    339:       /* less performance critical, and could perhaps be done with     */
                    340:       /* a lock.                                                       */
                    341: #     if defined(GENERIC_COMPARE_AND_SWAP)
                    342:        /* Probably not useful, except for debugging.   */
                    343:        /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we        */
                    344:        /* minimize its use.                                            */
                    345:        extern pthread_mutex_t GC_compare_and_swap_lock;
                    346:
                    347:        /* Note that if GC_word updates are not atomic, a concurrent    */
                    348:        /* reader should acquire GC_compare_and_swap_lock.  On          */
                    349:        /* currently supported platforms, such updates are atomic.      */
                    350:        extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
                    351:                                               GC_word old, GC_word new_val);
                    352: #     endif /* GENERIC_COMPARE_AND_SWAP */
                    353: #     if defined(I386)
                    354: #      if !defined(GENERIC_COMPARE_AND_SWAP)
                    355:          /* Returns TRUE if the comparison succeeded. */
                    356:          inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
                    357:                                                       GC_word old,
                    358:                                                       GC_word new_val)
                    359:          {
                    360:           char result;
                    361:           __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
1.4     ! noro      362:                : "+m"(*(addr)), "=r"(result)
        !           363:                : "r" (new_val), "a"(old) : "memory");
1.1       noro      364:           return (GC_bool) result;
                    365:          }
                    366: #      endif /* !GENERIC_COMPARE_AND_SWAP */
1.4     ! noro      367:        inline static void GC_memory_barrier()
1.1       noro      368:        {
                    369:         /* We believe the processor ensures at least processor */
                    370:         /* consistent ordering.  Thus a compiler barrier       */
                    371:         /* should suffice.                                     */
                    372:          __asm__ __volatile__("" : : : "memory");
                    373:        }
                    374: #     endif /* I386 */
1.4     ! noro      375:
        !           376: #     if defined(POWERPC)
        !           377: #      if !defined(GENERIC_COMPARE_AND_SWAP)
        !           378:         /* Returns TRUE if the comparison succeeded. */
        !           379:         inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
        !           380:             GC_word old, GC_word new_val)
        !           381:         {
        !           382:             int result, dummy;
        !           383:             __asm__ __volatile__(
        !           384:                 "1:\tlwarx %0,0,%5\n"
        !           385:                   "\tcmpw %0,%4\n"
        !           386:                   "\tbne  2f\n"
        !           387:                   "\tstwcx. %3,0,%2\n"
        !           388:                   "\tbne- 1b\n"
        !           389:                   "\tsync\n"
        !           390:                   "\tli %1, 1\n"
        !           391:                   "\tb 3f\n"
        !           392:                 "2:\tli %1, 0\n"
        !           393:                 "3:\t\n"
        !           394:                 :  "=&r" (dummy), "=r" (result), "=p" (addr)
        !           395:                 :  "r" (new_val), "r" (old), "2"(addr)
        !           396:                 : "cr0","memory");
        !           397:             return (GC_bool) result;
        !           398:         }
        !           399: #      endif /* !GENERIC_COMPARE_AND_SWAP */
        !           400:         inline static void GC_memory_barrier()
        !           401:         {
        !           402:             __asm__ __volatile__("sync" : : : "memory");
        !           403:         }
        !           404: #     endif /* POWERPC */
        !           405:
1.1       noro      406: #     if defined(IA64)
                    407: #      if !defined(GENERIC_COMPARE_AND_SWAP)
                    408:          inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
                    409:                                                       GC_word old, GC_word new_val)
                    410:         {
                    411:          unsigned long oldval;
                    412:          __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
                    413:                : "=r"(oldval), "=m"(*addr)
                    414:                : "r"(new_val), "1"(*addr), "r"(old) : "memory");
                    415:          return (oldval == old);
                    416:          }
                    417: #      endif /* !GENERIC_COMPARE_AND_SWAP */
                    418: #      if 0
                    419:        /* Shouldn't be needed; we use volatile stores instead. */
1.4     ! noro      420:         inline static void GC_memory_barrier()
1.1       noro      421:         {
                    422:           __asm__ __volatile__("mf" : : : "memory");
                    423:         }
                    424: #      endif /* 0 */
                    425: #     endif /* IA64 */
1.4     ! noro      426: #     if defined(ALPHA)
        !           427: #      if !defined(GENERIC_COMPARE_AND_SWAP)
        !           428: #        if defined(__GNUC__)
        !           429:            inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
        !           430:                                                         GC_word old, GC_word new_val)
        !           431:           {
        !           432:             unsigned long was_equal;
        !           433:              unsigned long temp;
        !           434:
        !           435:              __asm__ __volatile__(
        !           436:                              "1:     ldq_l %0,%1\n"
        !           437:                              "       cmpeq %0,%4,%2\n"
        !           438:                             "       mov %3,%0\n"
        !           439:                              "       beq %2,2f\n"
        !           440:                              "       stq_c %0,%1\n"
        !           441:                              "       beq %0,1b\n"
        !           442:                              "2:\n"
        !           443:                              "       mb\n"
        !           444:                              :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
        !           445:                              : "r" (new_val), "Ir" (old)
        !           446:                             :"memory");
        !           447:              return was_equal;
        !           448:            }
        !           449: #        else /* !__GNUC__ */
        !           450:            inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
        !           451:                                                         GC_word old, GC_word new_val)
        !           452:          {
        !           453:            return __CMP_STORE_QUAD(addr, old, new_val, addr);
        !           454:           }
        !           455: #        endif /* !__GNUC__ */
        !           456: #      endif /* !GENERIC_COMPARE_AND_SWAP */
        !           457: #      ifdef __GNUC__
        !           458:          inline static void GC_memory_barrier()
        !           459:          {
        !           460:            __asm__ __volatile__("mb" : : : "memory");
        !           461:          }
        !           462: #      else
        !           463: #       define GC_memory_barrier() asm("mb")
        !           464: #      endif /* !__GNUC__ */
        !           465: #     endif /* ALPHA */
        !           466: #     if defined(S390)
        !           467: #      if !defined(GENERIC_COMPARE_AND_SWAP)
        !           468:          inline static GC_bool GC_compare_and_exchange(volatile C_word *addr,
        !           469:                                          GC_word old, GC_word new_val)
        !           470:          {
        !           471:            int retval;
        !           472:            __asm__ __volatile__ (
        !           473: #            ifndef __s390x__
        !           474:                "     cs  %1,%2,0(%3)\n"
        !           475: #            else
        !           476:                "     csg %1,%2,0(%3)\n"
        !           477: #            endif
        !           478:              "     ipm %0\n"
        !           479:              "     srl %0,28\n"
        !           480:              : "=&d" (retval), "+d" (old)
        !           481:              : "d" (new_val), "a" (addr)
        !           482:              : "cc", "memory");
        !           483:            return retval == 0;
        !           484:          }
        !           485: #      endif
        !           486: #     endif
1.1       noro      487: #     if !defined(GENERIC_COMPARE_AND_SWAP)
                    488:         /* Returns the original value of *addr.        */
                    489:         inline static GC_word GC_atomic_add(volatile GC_word *addr,
                    490:                                            GC_word how_much)
                    491:         {
                    492:          GC_word old;
                    493:          do {
                    494:            old = *addr;
                    495:          } while (!GC_compare_and_exchange(addr, old, old+how_much));
                    496:           return old;
                    497:         }
                    498: #     else /* GENERIC_COMPARE_AND_SWAP */
                    499:        /* So long as a GC_word can be atomically updated, it should    */
                    500:        /* be OK to read *addr without a lock.                          */
                    501:        extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
                    502: #     endif /* GENERIC_COMPARE_AND_SWAP */
                    503:
                    504: #    endif /* PARALLEL_MARK */
                    505:
                    506: #    if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
                    507:       /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to  */
                    508:       /* be held for long periods, if it is held at all.  Thus spinning        */
                    509:       /* and sleeping for fixed periods are likely to result in        */
                    510:       /* significant wasted time.  We thus rely mostly on queued locks. */
                    511: #     define USE_SPIN_LOCK
                    512:       extern volatile unsigned int GC_allocate_lock;
                    513:       extern void GC_lock(void);
                    514:        /* Allocation lock holder.  Only set if acquired by client through */
                    515:        /* GC_call_with_alloc_lock.                                        */
                    516: #     ifdef GC_ASSERTIONS
                    517: #        define LOCK() \
                    518:                { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
                    519:                  SET_LOCK_HOLDER(); }
                    520: #        define UNLOCK() \
                    521:                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
                    522:                  GC_clear(&GC_allocate_lock); }
                    523: #     else
                    524: #        define LOCK() \
                    525:                { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
                    526: #        define UNLOCK() \
                    527:                GC_clear(&GC_allocate_lock)
                    528: #     endif /* !GC_ASSERTIONS */
                    529: #     if 0
                    530:        /* Another alternative for OSF1 might be:               */
                    531: #       include <sys/mman.h>
                    532:         extern msemaphore GC_allocate_semaphore;
                    533: #       define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
                    534:                            != 0) GC_lock(); else GC_allocate_lock = 1; }
                    535:         /* The following is INCORRECT, since the memory model is too weak. */
                    536:        /* Is this true?  Presumably msem_unlock has the right semantics?  */
                    537:        /*              - HB                                               */
                    538: #       define UNLOCK() { GC_allocate_lock = 0; \
                    539:                           msem_unlock(&GC_allocate_semaphore, 0); }
                    540: #     endif /* 0 */
                    541: #    else /* THREAD_LOCAL_ALLOC  || USE_PTHREAD_LOCKS */
                    542: #      ifndef USE_PTHREAD_LOCKS
                    543: #        define USE_PTHREAD_LOCKS
                    544: #      endif
                    545: #    endif /* THREAD_LOCAL_ALLOC */
                    546: #   ifdef USE_PTHREAD_LOCKS
                    547: #      include <pthread.h>
                    548:        extern pthread_mutex_t GC_allocate_ml;
                    549: #      ifdef GC_ASSERTIONS
                    550: #        define LOCK() \
                    551:                { GC_lock(); \
                    552:                  SET_LOCK_HOLDER(); }
                    553: #        define UNLOCK() \
                    554:                { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
                    555:                  pthread_mutex_unlock(&GC_allocate_ml); }
                    556: #      else /* !GC_ASSERTIONS */
1.4     ! noro      557: #        if defined(NO_PTHREAD_TRYLOCK)
        !           558: #          define LOCK() GC_lock();
        !           559: #        else /* !defined(NO_PTHREAD_TRYLOCK) */
1.1       noro      560: #        define LOCK() \
                    561:           { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
1.4     ! noro      562: #        endif
1.1       noro      563: #        define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
                    564: #      endif /* !GC_ASSERTIONS */
                    565: #   endif /* USE_PTHREAD_LOCKS */
                    566: #   define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
                    567: #   define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
                    568: #   define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
                    569:     extern VOLATILE GC_bool GC_collecting;
                    570: #   define ENTER_GC() GC_collecting = 1;
                    571: #   define EXIT_GC() GC_collecting = 0;
                    572:     extern void GC_lock(void);
                    573:     extern pthread_t GC_lock_holder;
                    574: #   ifdef GC_ASSERTIONS
                    575:       extern pthread_t GC_mark_lock_holder;
                    576: #   endif
1.3       noro      577: #  endif /* GC_PTHREADS with linux_threads.c implementation */
                    578: #  if defined(GC_IRIX_THREADS)
1.1       noro      579: #    include <pthread.h>
                    580:      /* This probably should never be included, but I can't test       */
                    581:      /* on Irix anymore.                                               */
                    582: #    include <mutex.h>
                    583:
1.4     ! noro      584:      extern volatile unsigned int GC_allocate_lock;
1.1       noro      585:        /* This is not a mutex because mutexes that obey the (optional)         */
                    586:        /* POSIX scheduling rules are subject to convoys in high contention     */
                    587:        /* applications.  This is basically a spin lock.                        */
                    588:      extern pthread_t GC_lock_holder;
                    589:      extern void GC_lock(void);
                    590:        /* Allocation lock holder.  Only set if acquired by client through */
                    591:        /* GC_call_with_alloc_lock.                                        */
                    592: #    define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
                    593: #    define NO_THREAD (pthread_t)(-1)
                    594: #    define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
                    595: #    define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
1.3       noro      596: #    define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
1.1       noro      597: #    define UNLOCK() GC_clear(&GC_allocate_lock);
                    598:      extern VOLATILE GC_bool GC_collecting;
                    599: #    define ENTER_GC() \
                    600:                { \
                    601:                    GC_collecting = 1; \
                    602:                }
                    603: #    define EXIT_GC() GC_collecting = 0;
1.3       noro      604: #  endif /* GC_IRIX_THREADS */
                    605: #  if defined(GC_WIN32_THREADS)
                    606: #    if defined(GC_PTHREADS)
                    607: #      include <pthread.h>
                    608:        extern pthread_mutex_t GC_allocate_ml;
                    609: #      define LOCK()   pthread_mutex_lock(&GC_allocate_ml)
                    610: #      define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
                    611: #    else
                    612: #      include <windows.h>
                    613:        GC_API CRITICAL_SECTION GC_allocate_ml;
                    614: #      define LOCK() EnterCriticalSection(&GC_allocate_ml);
                    615: #      define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
                    616: #    endif
1.1       noro      617: #  endif
                    618: #  ifndef SET_LOCK_HOLDER
                    619: #      define SET_LOCK_HOLDER()
                    620: #      define UNSET_LOCK_HOLDER()
                    621: #      define I_HOLD_LOCK() FALSE
                    622:                /* Used on platforms were locks can be reacquired,      */
                    623:                /* so it doesn't matter if we lie.                      */
                    624: #  endif
                    625: # else /* !THREADS */
                    626: #    define LOCK()
                    627: #    define UNLOCK()
                    628: # endif /* !THREADS */
                    629: # ifndef SET_LOCK_HOLDER
                    630: #   define SET_LOCK_HOLDER()
                    631: #   define UNSET_LOCK_HOLDER()
                    632: #   define I_HOLD_LOCK() FALSE
                    633:                /* Used on platforms were locks can be reacquired,      */
                    634:                /* so it doesn't matter if we lie.                      */
                    635: # endif
                    636: # ifndef ENTER_GC
                    637: #   define ENTER_GC()
                    638: #   define EXIT_GC()
                    639: # endif
                    640:
                    641: # ifndef DCL_LOCK_STATE
                    642: #   define DCL_LOCK_STATE
                    643: # endif
                    644: # ifndef FASTLOCK
                    645: #   define FASTLOCK() LOCK()
                    646: #   define FASTLOCK_SUCCEEDED() TRUE
                    647: #   define FASTUNLOCK() UNLOCK()
                    648: # endif
                    649:
                    650: #endif /* GC_LOCKS_H */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>