=================================================================== RCS file: /home/cvs/OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h,v retrieving revision 1.1 retrieving revision 1.4 diff -u -p -r1.1 -r1.4 --- OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h 2001/04/20 07:56:56 1.1 +++ OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h 2003/06/24 05:11:41 1.4 @@ -43,6 +43,7 @@ * */ # ifdef THREADS + void GC_noop1 GC_PROTO((word)); # ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */ # include "th/PCR_Th.h" # include "th/PCR_ThCrSec.h" @@ -75,7 +76,7 @@ # define LOCK() RT0u__inCritical++ # define UNLOCK() RT0u__inCritical-- # endif -# ifdef SOLARIS_THREADS +# ifdef GC_SOLARIS_THREADS # include # include extern mutex_t GC_allocate_ml; @@ -144,23 +145,24 @@ # if defined(POWERPC) inline static int GC_test_and_set(volatile unsigned int *addr) { int oldval; - int temp = 1; // locked value + int temp = 1; /* locked value */ __asm__ __volatile__( - "1:\tlwarx %0,0,%3\n" // load and reserve - "\tcmpwi %0, 0\n" // if load is - "\tbne 2f\n" // non-zero, return already set - "\tstwcx. %2,0,%1\n" // else store conditional - "\tbne- 1b\n" // retry if lost reservation - "2:\t\n" // oldval is zero if we set + "1:\tlwarx %0,0,%3\n" /* load and reserve */ + "\tcmpwi %0, 0\n" /* if load is */ + "\tbne 2f\n" /* non-zero, return already set */ + "\tstwcx. %2,0,%1\n" /* else store conditional */ + "\tbne- 1b\n" /* retry if lost reservation */ + "\tsync\n" /* import barrier */ + "2:\t\n" /* oldval is zero if we set */ : "=&r"(oldval), "=p"(addr) : "r"(temp), "1"(addr) - : "memory"); - return (int)oldval; + : "cr0","memory"); + return oldval; } # define GC_TEST_AND_SET_DEFINED inline static void GC_clear(volatile unsigned int *addr) { - __asm__ __volatile__("eieio" ::: "memory"); + __asm__ __volatile__("eieio" : : : "memory"); *(addr) = 0; } # define GC_CLEAR_DEFINED @@ -190,8 +192,11 @@ return oldvalue; } # define GC_TEST_AND_SET_DEFINED - /* Should probably also define GC_clear, since it needs */ - /* a memory barrier ?? */ + inline static void GC_clear(volatile unsigned int *addr) { + __asm__ __volatile__("mb" : : : "memory"); + *(addr) = 0; + } +# define GC_CLEAR_DEFINED # endif /* ALPHA */ # ifdef ARM32 inline static int GC_test_and_set(volatile unsigned int *addr) { @@ -208,26 +213,88 @@ } # define GC_TEST_AND_SET_DEFINED # endif /* ARM32 */ +# ifdef S390 + inline static int GC_test_and_set(volatile unsigned int *addr) { + int ret; + __asm__ __volatile__ ( + " l %0,0(%2)\n" + "0: cs %0,%1,0(%2)\n" + " jl 0b" + : "=&d" (ret) + : "d" (1), "a" (addr) + : "cc", "memory"); + return ret; + } +# endif # endif /* __GNUC__ */ # if (defined(ALPHA) && !defined(__GNUC__)) -# define GC_test_and_set(addr) __cxx_test_and_set_atomic(addr, 1) +# ifndef OSF1 + --> We currently assume that if gcc is not used, we are + --> running under Tru64. +# endif +# include +# include +# define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1) # define GC_TEST_AND_SET_DEFINED +# define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; } +# define GC_CLEAR_DEFINED # endif # if defined(MSWIN32) # define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1) # define GC_TEST_AND_SET_DEFINED # endif # ifdef MIPS -# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \ +# ifdef LINUX +# include +# define GC_test_and_set(addr) _test_and_set((int *) addr,1) +# define GC_TEST_AND_SET_DEFINED +# elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \ || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700 -# define GC_test_and_set(addr, v) test_and_set(addr,v) +# ifdef __GNUC__ +# define GC_test_and_set(addr) _test_and_set((void *)addr,1) +# else +# define GC_test_and_set(addr) test_and_set((void *)addr,1) +# endif # else -# define GC_test_and_set(addr, v) __test_and_set(addr,v) +# define GC_test_and_set(addr) __test_and_set32((void *)addr,1) # define GC_clear(addr) __lock_release(addr); # define GC_CLEAR_DEFINED # endif # define GC_TEST_AND_SET_DEFINED # endif /* MIPS */ +# if defined(_AIX) +# include +# if (defined(_POWER) || defined(_POWERPC)) +# if defined(__GNUC__) + inline static void GC_memsync() { + __asm__ __volatile__ ("sync" : : : "memory"); + } +# else +# ifndef inline +# define inline __inline +# endif +# pragma mc_func GC_memsync { \ + "7c0004ac" /* sync (same opcode used for dcs)*/ \ + } +# endif +# else +# error dont know how to memsync +# endif + inline static int GC_test_and_set(volatile unsigned int * addr) { + int oldvalue = 0; + if (compare_and_swap((void *)addr, &oldvalue, 1)) { + GC_memsync(); + return 0; + } else return 1; + } +# define GC_TEST_AND_SET_DEFINED + inline static void GC_clear(volatile unsigned int *addr) { + GC_memsync(); + *(addr) = 0; + } +# define GC_CLEAR_DEFINED + +# endif # if 0 /* defined(HP_PA) */ /* The official recommendation seems to be to not use ldcw from */ /* user mode. Since multithreaded incremental collection doesn't */ @@ -236,7 +303,7 @@ /* "set" means 0 and "clear" means 1 here. */ # define GC_test_and_set(addr) !GC_test_and_clear(addr); # define GC_TEST_AND_SET_DEFINED -# define GC_clear(addr) GC_noop1(addr); *(volatile unsigned int *)addr = 1; +# define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1; /* The above needs a memory barrier! */ # define GC_CLEAR_DEFINED # endif @@ -260,8 +327,8 @@ # define USE_PTHREAD_LOCKS # endif -# if defined(LINUX_THREADS) || defined(OSF1_THREADS) \ - || defined(HPUX_THREADS) +# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \ + && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS) # define NO_THREAD (pthread_t)(-1) # include # if defined(PARALLEL_MARK) @@ -292,12 +359,12 @@ { char result; __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1" - : "=m"(*(addr)), "=r"(result) - : "r" (new_val), "0"(*(addr)), "a"(old) : "memory"); + : "+m"(*(addr)), "=r"(result) + : "r" (new_val), "a"(old) : "memory"); return (GC_bool) result; } # endif /* !GENERIC_COMPARE_AND_SWAP */ - inline static void GC_memory_write_barrier() + inline static void GC_memory_barrier() { /* We believe the processor ensures at least processor */ /* consistent ordering. Thus a compiler barrier */ @@ -305,6 +372,37 @@ __asm__ __volatile__("" : : : "memory"); } # endif /* I386 */ + +# if defined(POWERPC) +# if !defined(GENERIC_COMPARE_AND_SWAP) + /* Returns TRUE if the comparison succeeded. */ + inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, + GC_word old, GC_word new_val) + { + int result, dummy; + __asm__ __volatile__( + "1:\tlwarx %0,0,%5\n" + "\tcmpw %0,%4\n" + "\tbne 2f\n" + "\tstwcx. %3,0,%2\n" + "\tbne- 1b\n" + "\tsync\n" + "\tli %1, 1\n" + "\tb 3f\n" + "2:\tli %1, 0\n" + "3:\t\n" + : "=&r" (dummy), "=r" (result), "=p" (addr) + : "r" (new_val), "r" (old), "2"(addr) + : "cr0","memory"); + return (GC_bool) result; + } +# endif /* !GENERIC_COMPARE_AND_SWAP */ + inline static void GC_memory_barrier() + { + __asm__ __volatile__("sync" : : : "memory"); + } +# endif /* POWERPC */ + # if defined(IA64) # if !defined(GENERIC_COMPARE_AND_SWAP) inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, @@ -319,12 +417,73 @@ # endif /* !GENERIC_COMPARE_AND_SWAP */ # if 0 /* Shouldn't be needed; we use volatile stores instead. */ - inline static void GC_memory_write_barrier() + inline static void GC_memory_barrier() { __asm__ __volatile__("mf" : : : "memory"); } # endif /* 0 */ # endif /* IA64 */ +# if defined(ALPHA) +# if !defined(GENERIC_COMPARE_AND_SWAP) +# if defined(__GNUC__) + inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, + GC_word old, GC_word new_val) + { + unsigned long was_equal; + unsigned long temp; + + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " cmpeq %0,%4,%2\n" + " mov %3,%0\n" + " beq %2,2f\n" + " stq_c %0,%1\n" + " beq %0,1b\n" + "2:\n" + " mb\n" + :"=&r" (temp), "=m" (*addr), "=&r" (was_equal) + : "r" (new_val), "Ir" (old) + :"memory"); + return was_equal; + } +# else /* !__GNUC__ */ + inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, + GC_word old, GC_word new_val) + { + return __CMP_STORE_QUAD(addr, old, new_val, addr); + } +# endif /* !__GNUC__ */ +# endif /* !GENERIC_COMPARE_AND_SWAP */ +# ifdef __GNUC__ + inline static void GC_memory_barrier() + { + __asm__ __volatile__("mb" : : : "memory"); + } +# else +# define GC_memory_barrier() asm("mb") +# endif /* !__GNUC__ */ +# endif /* ALPHA */ +# if defined(S390) +# if !defined(GENERIC_COMPARE_AND_SWAP) + inline static GC_bool GC_compare_and_exchange(volatile C_word *addr, + GC_word old, GC_word new_val) + { + int retval; + __asm__ __volatile__ ( +# ifndef __s390x__ + " cs %1,%2,0(%3)\n" +# else + " csg %1,%2,0(%3)\n" +# endif + " ipm %0\n" + " srl %0,28\n" + : "=&d" (retval), "+d" (old) + : "d" (new_val), "a" (addr) + : "cc", "memory"); + return retval == 0; + } +# endif +# endif # if !defined(GENERIC_COMPARE_AND_SWAP) /* Returns the original value of *addr. */ inline static GC_word GC_atomic_add(volatile GC_word *addr, @@ -395,8 +554,12 @@ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \ pthread_mutex_unlock(&GC_allocate_ml); } # else /* !GC_ASSERTIONS */ +# if defined(NO_PTHREAD_TRYLOCK) +# define LOCK() GC_lock(); +# else /* !defined(NO_PTHREAD_TRYLOCK) */ # define LOCK() \ { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); } +# endif # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml) # endif /* !GC_ASSERTIONS */ # endif /* USE_PTHREAD_LOCKS */ @@ -411,14 +574,14 @@ # ifdef GC_ASSERTIONS extern pthread_t GC_mark_lock_holder; # endif -# endif /* LINUX_THREADS || OSF1_THREADS || HPUX_THREADS */ -# if defined(IRIX_THREADS) +# endif /* GC_PTHREADS with linux_threads.c implementation */ +# if defined(GC_IRIX_THREADS) # include /* This probably should never be included, but I can't test */ /* on Irix anymore. */ # include - extern unsigned long GC_allocate_lock; + extern volatile unsigned int GC_allocate_lock; /* This is not a mutex because mutexes that obey the (optional) */ /* POSIX scheduling rules are subject to convoys in high contention */ /* applications. This is basically a spin lock. */ @@ -430,7 +593,7 @@ # define NO_THREAD (pthread_t)(-1) # define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD # define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self())) -# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); } +# define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); } # define UNLOCK() GC_clear(&GC_allocate_lock); extern VOLATILE GC_bool GC_collecting; # define ENTER_GC() \ @@ -438,12 +601,19 @@ GC_collecting = 1; \ } # define EXIT_GC() GC_collecting = 0; -# endif /* IRIX_THREADS */ -# ifdef WIN32_THREADS -# include - GC_API CRITICAL_SECTION GC_allocate_ml; -# define LOCK() EnterCriticalSection(&GC_allocate_ml); -# define UNLOCK() LeaveCriticalSection(&GC_allocate_ml); +# endif /* GC_IRIX_THREADS */ +# if defined(GC_WIN32_THREADS) +# if defined(GC_PTHREADS) +# include + extern pthread_mutex_t GC_allocate_ml; +# define LOCK() pthread_mutex_lock(&GC_allocate_ml) +# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml) +# else +# include + GC_API CRITICAL_SECTION GC_allocate_ml; +# define LOCK() EnterCriticalSection(&GC_allocate_ml); +# define UNLOCK() LeaveCriticalSection(&GC_allocate_ml); +# endif # endif # ifndef SET_LOCK_HOLDER # define SET_LOCK_HOLDER()