=================================================================== RCS file: /home/cvs/OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h,v retrieving revision 1.3 retrieving revision 1.4 diff -u -p -r1.3 -r1.4 --- OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h 2002/07/24 08:00:20 1.3 +++ OpenXM_contrib2/asir2000/gc/include/private/gc_locks.h 2003/06/24 05:11:41 1.4 @@ -153,11 +153,12 @@ "\tbne 2f\n" /* non-zero, return already set */ "\tstwcx. %2,0,%1\n" /* else store conditional */ "\tbne- 1b\n" /* retry if lost reservation */ + "\tsync\n" /* import barrier */ "2:\t\n" /* oldval is zero if we set */ : "=&r"(oldval), "=p"(addr) : "r"(temp), "1"(addr) - : "memory"); - return (int)oldval; + : "cr0","memory"); + return oldval; } # define GC_TEST_AND_SET_DEFINED inline static void GC_clear(volatile unsigned int *addr) { @@ -191,8 +192,11 @@ return oldvalue; } # define GC_TEST_AND_SET_DEFINED - /* Should probably also define GC_clear, since it needs */ - /* a memory barrier ?? */ + inline static void GC_clear(volatile unsigned int *addr) { + __asm__ __volatile__("mb" : : : "memory"); + *(addr) = 0; + } +# define GC_CLEAR_DEFINED # endif /* ALPHA */ # ifdef ARM32 inline static int GC_test_and_set(volatile unsigned int *addr) { @@ -209,10 +213,31 @@ } # define GC_TEST_AND_SET_DEFINED # endif /* ARM32 */ +# ifdef S390 + inline static int GC_test_and_set(volatile unsigned int *addr) { + int ret; + __asm__ __volatile__ ( + " l %0,0(%2)\n" + "0: cs %0,%1,0(%2)\n" + " jl 0b" + : "=&d" (ret) + : "d" (1), "a" (addr) + : "cc", "memory"); + return ret; + } +# endif # endif /* __GNUC__ */ # if (defined(ALPHA) && !defined(__GNUC__)) -# define GC_test_and_set(addr) __cxx_test_and_set_atomic(addr, 1) +# ifndef OSF1 + --> We currently assume that if gcc is not used, we are + --> running under Tru64. +# endif +# include +# include +# define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1) # define GC_TEST_AND_SET_DEFINED +# define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; } +# define GC_CLEAR_DEFINED # endif # if defined(MSWIN32) # define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1) @@ -226,17 +251,50 @@ # elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \ || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700 # ifdef __GNUC__ -# define GC_test_and_set(addr) _test_and_set(addr,1) +# define GC_test_and_set(addr) _test_and_set((void *)addr,1) # else -# define GC_test_and_set(addr) test_and_set(addr,1) +# define GC_test_and_set(addr) test_and_set((void *)addr,1) # endif # else -# define GC_test_and_set(addr) __test_and_set(addr,1) +# define GC_test_and_set(addr) __test_and_set32((void *)addr,1) # define GC_clear(addr) __lock_release(addr); # define GC_CLEAR_DEFINED # endif # define GC_TEST_AND_SET_DEFINED # endif /* MIPS */ +# if defined(_AIX) +# include +# if (defined(_POWER) || defined(_POWERPC)) +# if defined(__GNUC__) + inline static void GC_memsync() { + __asm__ __volatile__ ("sync" : : : "memory"); + } +# else +# ifndef inline +# define inline __inline +# endif +# pragma mc_func GC_memsync { \ + "7c0004ac" /* sync (same opcode used for dcs)*/ \ + } +# endif +# else +# error dont know how to memsync +# endif + inline static int GC_test_and_set(volatile unsigned int * addr) { + int oldvalue = 0; + if (compare_and_swap((void *)addr, &oldvalue, 1)) { + GC_memsync(); + return 0; + } else return 1; + } +# define GC_TEST_AND_SET_DEFINED + inline static void GC_clear(volatile unsigned int *addr) { + GC_memsync(); + *(addr) = 0; + } +# define GC_CLEAR_DEFINED + +# endif # if 0 /* defined(HP_PA) */ /* The official recommendation seems to be to not use ldcw from */ /* user mode. Since multithreaded incremental collection doesn't */ @@ -301,12 +359,12 @@ { char result; __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1" - : "=m"(*(addr)), "=r"(result) - : "r" (new_val), "0"(*(addr)), "a"(old) : "memory"); + : "+m"(*(addr)), "=r"(result) + : "r" (new_val), "a"(old) : "memory"); return (GC_bool) result; } # endif /* !GENERIC_COMPARE_AND_SWAP */ - inline static void GC_memory_write_barrier() + inline static void GC_memory_barrier() { /* We believe the processor ensures at least processor */ /* consistent ordering. Thus a compiler barrier */ @@ -314,6 +372,37 @@ __asm__ __volatile__("" : : : "memory"); } # endif /* I386 */ + +# if defined(POWERPC) +# if !defined(GENERIC_COMPARE_AND_SWAP) + /* Returns TRUE if the comparison succeeded. */ + inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, + GC_word old, GC_word new_val) + { + int result, dummy; + __asm__ __volatile__( + "1:\tlwarx %0,0,%5\n" + "\tcmpw %0,%4\n" + "\tbne 2f\n" + "\tstwcx. %3,0,%2\n" + "\tbne- 1b\n" + "\tsync\n" + "\tli %1, 1\n" + "\tb 3f\n" + "2:\tli %1, 0\n" + "3:\t\n" + : "=&r" (dummy), "=r" (result), "=p" (addr) + : "r" (new_val), "r" (old), "2"(addr) + : "cr0","memory"); + return (GC_bool) result; + } +# endif /* !GENERIC_COMPARE_AND_SWAP */ + inline static void GC_memory_barrier() + { + __asm__ __volatile__("sync" : : : "memory"); + } +# endif /* POWERPC */ + # if defined(IA64) # if !defined(GENERIC_COMPARE_AND_SWAP) inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, @@ -328,12 +417,73 @@ # endif /* !GENERIC_COMPARE_AND_SWAP */ # if 0 /* Shouldn't be needed; we use volatile stores instead. */ - inline static void GC_memory_write_barrier() + inline static void GC_memory_barrier() { __asm__ __volatile__("mf" : : : "memory"); } # endif /* 0 */ # endif /* IA64 */ +# if defined(ALPHA) +# if !defined(GENERIC_COMPARE_AND_SWAP) +# if defined(__GNUC__) + inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, + GC_word old, GC_word new_val) + { + unsigned long was_equal; + unsigned long temp; + + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " cmpeq %0,%4,%2\n" + " mov %3,%0\n" + " beq %2,2f\n" + " stq_c %0,%1\n" + " beq %0,1b\n" + "2:\n" + " mb\n" + :"=&r" (temp), "=m" (*addr), "=&r" (was_equal) + : "r" (new_val), "Ir" (old) + :"memory"); + return was_equal; + } +# else /* !__GNUC__ */ + inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, + GC_word old, GC_word new_val) + { + return __CMP_STORE_QUAD(addr, old, new_val, addr); + } +# endif /* !__GNUC__ */ +# endif /* !GENERIC_COMPARE_AND_SWAP */ +# ifdef __GNUC__ + inline static void GC_memory_barrier() + { + __asm__ __volatile__("mb" : : : "memory"); + } +# else +# define GC_memory_barrier() asm("mb") +# endif /* !__GNUC__ */ +# endif /* ALPHA */ +# if defined(S390) +# if !defined(GENERIC_COMPARE_AND_SWAP) + inline static GC_bool GC_compare_and_exchange(volatile C_word *addr, + GC_word old, GC_word new_val) + { + int retval; + __asm__ __volatile__ ( +# ifndef __s390x__ + " cs %1,%2,0(%3)\n" +# else + " csg %1,%2,0(%3)\n" +# endif + " ipm %0\n" + " srl %0,28\n" + : "=&d" (retval), "+d" (old) + : "d" (new_val), "a" (addr) + : "cc", "memory"); + return retval == 0; + } +# endif +# endif # if !defined(GENERIC_COMPARE_AND_SWAP) /* Returns the original value of *addr. */ inline static GC_word GC_atomic_add(volatile GC_word *addr, @@ -404,8 +554,12 @@ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \ pthread_mutex_unlock(&GC_allocate_ml); } # else /* !GC_ASSERTIONS */ +# if defined(NO_PTHREAD_TRYLOCK) +# define LOCK() GC_lock(); +# else /* !defined(NO_PTHREAD_TRYLOCK) */ # define LOCK() \ { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); } +# endif # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml) # endif /* !GC_ASSERTIONS */ # endif /* USE_PTHREAD_LOCKS */ @@ -427,7 +581,7 @@ /* on Irix anymore. */ # include - extern unsigned long GC_allocate_lock; + extern volatile unsigned int GC_allocate_lock; /* This is not a mutex because mutexes that obey the (optional) */ /* POSIX scheduling rules are subject to convoys in high contention */ /* applications. This is basically a spin lock. */