version 1.3, 2002/07/24 08:00:20 |
version 1.4, 2003/06/24 05:11:41 |
|
|
"\tbne 2f\n" /* non-zero, return already set */ |
"\tbne 2f\n" /* non-zero, return already set */ |
"\tstwcx. %2,0,%1\n" /* else store conditional */ |
"\tstwcx. %2,0,%1\n" /* else store conditional */ |
"\tbne- 1b\n" /* retry if lost reservation */ |
"\tbne- 1b\n" /* retry if lost reservation */ |
|
"\tsync\n" /* import barrier */ |
"2:\t\n" /* oldval is zero if we set */ |
"2:\t\n" /* oldval is zero if we set */ |
: "=&r"(oldval), "=p"(addr) |
: "=&r"(oldval), "=p"(addr) |
: "r"(temp), "1"(addr) |
: "r"(temp), "1"(addr) |
: "memory"); |
: "cr0","memory"); |
return (int)oldval; |
return oldval; |
} |
} |
# define GC_TEST_AND_SET_DEFINED |
# define GC_TEST_AND_SET_DEFINED |
inline static void GC_clear(volatile unsigned int *addr) { |
inline static void GC_clear(volatile unsigned int *addr) { |
|
|
return oldvalue; |
return oldvalue; |
} |
} |
# define GC_TEST_AND_SET_DEFINED |
# define GC_TEST_AND_SET_DEFINED |
/* Should probably also define GC_clear, since it needs */ |
inline static void GC_clear(volatile unsigned int *addr) { |
/* a memory barrier ?? */ |
__asm__ __volatile__("mb" : : : "memory"); |
|
*(addr) = 0; |
|
} |
|
# define GC_CLEAR_DEFINED |
# endif /* ALPHA */ |
# endif /* ALPHA */ |
# ifdef ARM32 |
# ifdef ARM32 |
inline static int GC_test_and_set(volatile unsigned int *addr) { |
inline static int GC_test_and_set(volatile unsigned int *addr) { |
|
|
} |
} |
# define GC_TEST_AND_SET_DEFINED |
# define GC_TEST_AND_SET_DEFINED |
# endif /* ARM32 */ |
# endif /* ARM32 */ |
|
# ifdef S390 |
|
inline static int GC_test_and_set(volatile unsigned int *addr) { |
|
int ret; |
|
__asm__ __volatile__ ( |
|
" l %0,0(%2)\n" |
|
"0: cs %0,%1,0(%2)\n" |
|
" jl 0b" |
|
: "=&d" (ret) |
|
: "d" (1), "a" (addr) |
|
: "cc", "memory"); |
|
return ret; |
|
} |
|
# endif |
# endif /* __GNUC__ */ |
# endif /* __GNUC__ */ |
# if (defined(ALPHA) && !defined(__GNUC__)) |
# if (defined(ALPHA) && !defined(__GNUC__)) |
# define GC_test_and_set(addr) __cxx_test_and_set_atomic(addr, 1) |
# ifndef OSF1 |
|
--> We currently assume that if gcc is not used, we are |
|
--> running under Tru64. |
|
# endif |
|
# include <machine/builtins.h> |
|
# include <c_asm.h> |
|
# define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1) |
# define GC_TEST_AND_SET_DEFINED |
# define GC_TEST_AND_SET_DEFINED |
|
# define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; } |
|
# define GC_CLEAR_DEFINED |
# endif |
# endif |
# if defined(MSWIN32) |
# if defined(MSWIN32) |
# define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1) |
# define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1) |
|
|
# elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \ |
# elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \ |
|| !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700 |
|| !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700 |
# ifdef __GNUC__ |
# ifdef __GNUC__ |
# define GC_test_and_set(addr) _test_and_set(addr,1) |
# define GC_test_and_set(addr) _test_and_set((void *)addr,1) |
# else |
# else |
# define GC_test_and_set(addr) test_and_set(addr,1) |
# define GC_test_and_set(addr) test_and_set((void *)addr,1) |
# endif |
# endif |
# else |
# else |
# define GC_test_and_set(addr) __test_and_set(addr,1) |
# define GC_test_and_set(addr) __test_and_set32((void *)addr,1) |
# define GC_clear(addr) __lock_release(addr); |
# define GC_clear(addr) __lock_release(addr); |
# define GC_CLEAR_DEFINED |
# define GC_CLEAR_DEFINED |
# endif |
# endif |
# define GC_TEST_AND_SET_DEFINED |
# define GC_TEST_AND_SET_DEFINED |
# endif /* MIPS */ |
# endif /* MIPS */ |
|
# if defined(_AIX) |
|
# include <sys/atomic_op.h> |
|
# if (defined(_POWER) || defined(_POWERPC)) |
|
# if defined(__GNUC__) |
|
inline static void GC_memsync() { |
|
__asm__ __volatile__ ("sync" : : : "memory"); |
|
} |
|
# else |
|
# ifndef inline |
|
# define inline __inline |
|
# endif |
|
# pragma mc_func GC_memsync { \ |
|
"7c0004ac" /* sync (same opcode used for dcs)*/ \ |
|
} |
|
# endif |
|
# else |
|
# error dont know how to memsync |
|
# endif |
|
inline static int GC_test_and_set(volatile unsigned int * addr) { |
|
int oldvalue = 0; |
|
if (compare_and_swap((void *)addr, &oldvalue, 1)) { |
|
GC_memsync(); |
|
return 0; |
|
} else return 1; |
|
} |
|
# define GC_TEST_AND_SET_DEFINED |
|
inline static void GC_clear(volatile unsigned int *addr) { |
|
GC_memsync(); |
|
*(addr) = 0; |
|
} |
|
# define GC_CLEAR_DEFINED |
|
|
|
# endif |
# if 0 /* defined(HP_PA) */ |
# if 0 /* defined(HP_PA) */ |
/* The official recommendation seems to be to not use ldcw from */ |
/* The official recommendation seems to be to not use ldcw from */ |
/* user mode. Since multithreaded incremental collection doesn't */ |
/* user mode. Since multithreaded incremental collection doesn't */ |
|
|
{ |
{ |
char result; |
char result; |
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1" |
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1" |
: "=m"(*(addr)), "=r"(result) |
: "+m"(*(addr)), "=r"(result) |
: "r" (new_val), "0"(*(addr)), "a"(old) : "memory"); |
: "r" (new_val), "a"(old) : "memory"); |
return (GC_bool) result; |
return (GC_bool) result; |
} |
} |
# endif /* !GENERIC_COMPARE_AND_SWAP */ |
# endif /* !GENERIC_COMPARE_AND_SWAP */ |
inline static void GC_memory_write_barrier() |
inline static void GC_memory_barrier() |
{ |
{ |
/* We believe the processor ensures at least processor */ |
/* We believe the processor ensures at least processor */ |
/* consistent ordering. Thus a compiler barrier */ |
/* consistent ordering. Thus a compiler barrier */ |
|
|
__asm__ __volatile__("" : : : "memory"); |
__asm__ __volatile__("" : : : "memory"); |
} |
} |
# endif /* I386 */ |
# endif /* I386 */ |
|
|
|
# if defined(POWERPC) |
|
# if !defined(GENERIC_COMPARE_AND_SWAP) |
|
/* Returns TRUE if the comparison succeeded. */ |
|
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, |
|
GC_word old, GC_word new_val) |
|
{ |
|
int result, dummy; |
|
__asm__ __volatile__( |
|
"1:\tlwarx %0,0,%5\n" |
|
"\tcmpw %0,%4\n" |
|
"\tbne 2f\n" |
|
"\tstwcx. %3,0,%2\n" |
|
"\tbne- 1b\n" |
|
"\tsync\n" |
|
"\tli %1, 1\n" |
|
"\tb 3f\n" |
|
"2:\tli %1, 0\n" |
|
"3:\t\n" |
|
: "=&r" (dummy), "=r" (result), "=p" (addr) |
|
: "r" (new_val), "r" (old), "2"(addr) |
|
: "cr0","memory"); |
|
return (GC_bool) result; |
|
} |
|
# endif /* !GENERIC_COMPARE_AND_SWAP */ |
|
inline static void GC_memory_barrier() |
|
{ |
|
__asm__ __volatile__("sync" : : : "memory"); |
|
} |
|
# endif /* POWERPC */ |
|
|
# if defined(IA64) |
# if defined(IA64) |
# if !defined(GENERIC_COMPARE_AND_SWAP) |
# if !defined(GENERIC_COMPARE_AND_SWAP) |
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, |
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, |
|
|
# endif /* !GENERIC_COMPARE_AND_SWAP */ |
# endif /* !GENERIC_COMPARE_AND_SWAP */ |
# if 0 |
# if 0 |
/* Shouldn't be needed; we use volatile stores instead. */ |
/* Shouldn't be needed; we use volatile stores instead. */ |
inline static void GC_memory_write_barrier() |
inline static void GC_memory_barrier() |
{ |
{ |
__asm__ __volatile__("mf" : : : "memory"); |
__asm__ __volatile__("mf" : : : "memory"); |
} |
} |
# endif /* 0 */ |
# endif /* 0 */ |
# endif /* IA64 */ |
# endif /* IA64 */ |
|
# if defined(ALPHA) |
|
# if !defined(GENERIC_COMPARE_AND_SWAP) |
|
# if defined(__GNUC__) |
|
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, |
|
GC_word old, GC_word new_val) |
|
{ |
|
unsigned long was_equal; |
|
unsigned long temp; |
|
|
|
__asm__ __volatile__( |
|
"1: ldq_l %0,%1\n" |
|
" cmpeq %0,%4,%2\n" |
|
" mov %3,%0\n" |
|
" beq %2,2f\n" |
|
" stq_c %0,%1\n" |
|
" beq %0,1b\n" |
|
"2:\n" |
|
" mb\n" |
|
:"=&r" (temp), "=m" (*addr), "=&r" (was_equal) |
|
: "r" (new_val), "Ir" (old) |
|
:"memory"); |
|
return was_equal; |
|
} |
|
# else /* !__GNUC__ */ |
|
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, |
|
GC_word old, GC_word new_val) |
|
{ |
|
return __CMP_STORE_QUAD(addr, old, new_val, addr); |
|
} |
|
# endif /* !__GNUC__ */ |
|
# endif /* !GENERIC_COMPARE_AND_SWAP */ |
|
# ifdef __GNUC__ |
|
inline static void GC_memory_barrier() |
|
{ |
|
__asm__ __volatile__("mb" : : : "memory"); |
|
} |
|
# else |
|
# define GC_memory_barrier() asm("mb") |
|
# endif /* !__GNUC__ */ |
|
# endif /* ALPHA */ |
|
# if defined(S390) |
|
# if !defined(GENERIC_COMPARE_AND_SWAP) |
|
inline static GC_bool GC_compare_and_exchange(volatile C_word *addr, |
|
GC_word old, GC_word new_val) |
|
{ |
|
int retval; |
|
__asm__ __volatile__ ( |
|
# ifndef __s390x__ |
|
" cs %1,%2,0(%3)\n" |
|
# else |
|
" csg %1,%2,0(%3)\n" |
|
# endif |
|
" ipm %0\n" |
|
" srl %0,28\n" |
|
: "=&d" (retval), "+d" (old) |
|
: "d" (new_val), "a" (addr) |
|
: "cc", "memory"); |
|
return retval == 0; |
|
} |
|
# endif |
|
# endif |
# if !defined(GENERIC_COMPARE_AND_SWAP) |
# if !defined(GENERIC_COMPARE_AND_SWAP) |
/* Returns the original value of *addr. */ |
/* Returns the original value of *addr. */ |
inline static GC_word GC_atomic_add(volatile GC_word *addr, |
inline static GC_word GC_atomic_add(volatile GC_word *addr, |
|
|
{ GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \ |
{ GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \ |
pthread_mutex_unlock(&GC_allocate_ml); } |
pthread_mutex_unlock(&GC_allocate_ml); } |
# else /* !GC_ASSERTIONS */ |
# else /* !GC_ASSERTIONS */ |
|
# if defined(NO_PTHREAD_TRYLOCK) |
|
# define LOCK() GC_lock(); |
|
# else /* !defined(NO_PTHREAD_TRYLOCK) */ |
# define LOCK() \ |
# define LOCK() \ |
{ if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); } |
{ if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); } |
|
# endif |
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml) |
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml) |
# endif /* !GC_ASSERTIONS */ |
# endif /* !GC_ASSERTIONS */ |
# endif /* USE_PTHREAD_LOCKS */ |
# endif /* USE_PTHREAD_LOCKS */ |
|
|
/* on Irix anymore. */ |
/* on Irix anymore. */ |
# include <mutex.h> |
# include <mutex.h> |
|
|
extern unsigned long GC_allocate_lock; |
extern volatile unsigned int GC_allocate_lock; |
/* This is not a mutex because mutexes that obey the (optional) */ |
/* This is not a mutex because mutexes that obey the (optional) */ |
/* POSIX scheduling rules are subject to convoys in high contention */ |
/* POSIX scheduling rules are subject to convoys in high contention */ |
/* applications. This is basically a spin lock. */ |
/* applications. This is basically a spin lock. */ |