=================================================================== RCS file: /home/cvs/OpenXM_contrib/gc/Attic/gc_priv.h,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.3 diff -u -p -r1.1.1.2 -r1.1.1.3 --- OpenXM_contrib/gc/Attic/gc_priv.h 2000/04/14 11:07:59 1.1.1.2 +++ OpenXM_contrib/gc/Attic/gc_priv.h 2000/12/01 14:48:26 1.1.1.3 @@ -44,7 +44,7 @@ typedef GC_word word; typedef GC_signed_word signed_word; -# ifndef CONFIG_H +# ifndef GCCONFIG_H # include "gcconfig.h" # endif @@ -82,6 +82,7 @@ typedef char * ptr_t; /* A generic pointer to which we # define GC_FAR #endif + /*********************************/ /* */ /* Definitions for conservative */ @@ -173,15 +174,6 @@ typedef char * ptr_t; /* A generic pointer to which we /* May save significant amounts of space for obj_map */ /* entries. */ -#ifndef OLD_BLOCK_ALLOC - /* Macros controlling large block allocation strategy. */ -# define EXACT_FIRST /* Make a complete pass through the large object */ - /* free list before splitting a block */ -# define PRESERVE_LAST /* Do not divide last allocated heap segment */ - /* unless we would otherwise need to expand the */ - /* heap. */ -#endif - /* ALIGN_DOUBLE requires MERGE_SIZES at present. */ # if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES) # define MERGE_SIZES @@ -281,6 +273,13 @@ void GC_print_callers (/* struct callinfo info[NFRAMES # define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \ + (double) (a.tv_usec - b.tv_usec) / 1000.0) #else /* !BSD_TIME */ +# ifdef MSWIN32 +# include +# include +# define CLOCK_TYPE DWORD +# define GET_TIME(x) x = GetTickCount() +# define MS_TIME_DIFF(a,b) ((long)((a)-(b))) +# else /* !MSWIN32, !BSD_TIME */ # include # if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4) clock_t clock(); /* Not in time.h, where it belongs */ @@ -306,6 +305,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES # define GET_TIME(x) x = clock() # define MS_TIME_DIFF(a,b) ((unsigned long) \ (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC)) +# endif /* !MSWIN32 */ #endif /* !BSD_TIME */ /* We use bzero and bcopy internally. They may not be available. */ @@ -437,8 +437,11 @@ void GC_print_callers (/* struct callinfo info[NFRAMES # define LOCK() mutex_lock(&GC_allocate_ml); # define UNLOCK() mutex_unlock(&GC_allocate_ml); # endif -# ifdef LINUX_THREADS +# if defined(LINUX_THREADS) +# if defined(I386)|| defined(POWERPC) || defined(ALPHA) || defined(IA64) \ + || defined(M68K) # include +# define USE_SPIN_LOCK # if defined(I386) inline static int GC_test_and_set(volatile unsigned int *addr) { int oldval; @@ -448,9 +451,38 @@ void GC_print_callers (/* struct callinfo info[NFRAMES : "0"(1), "m"(*(addr))); return oldval; } -# else -# if defined(POWERPC) +# endif +# if defined(IA64) inline static int GC_test_and_set(volatile unsigned int *addr) { + int oldval; + __asm__ __volatile__("xchg4 %0=%1,%2" + : "=r"(oldval), "=m"(*addr) + : "r"(1), "1"(*addr)); + return oldval; + } + inline static void GC_clear(volatile unsigned int *addr) { + __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr)); + } +# define GC_CLEAR_DEFINED +# endif +# ifdef M68K + /* Contributed by Tony Mantler. I'm not sure how well it was */ + /* tested. */ + inline static int GC_test_and_set(volatile unsigned int *addr) { + char oldval; /* this must be no longer than 8 bits */ + + /* The return value is semi-phony. */ + /* 'tas' sets bit 7 while the return */ + /* value pretends bit 0 was set */ + __asm__ __volatile__( + "tas %1@; sne %0; negb %0" + : "=d" (oldval) + : "a" (addr)); + return oldval; + } +# endif +# if defined(POWERPC) + inline static int GC_test_and_set(volatile unsigned int *addr) { int oldval; int temp = 1; // locked value @@ -465,46 +497,61 @@ void GC_print_callers (/* struct callinfo info[NFRAMES : "r"(temp), "1"(addr) : "memory"); return (int)oldval; - } -# else -# ifdef ALPHA - inline static int GC_test_and_set(volatile unsigned int * -addr) - { - unsigned long oldvalue; - unsigned long temp; + } + inline static void GC_clear(volatile unsigned int *addr) { + __asm__ __volatile__("eieio"); + *(addr) = 0; + } +# define GC_CLEAR_DEFINED +# endif +# ifdef ALPHA + inline static int GC_test_and_set(volatile unsigned int * addr) + { + unsigned long oldvalue; + unsigned long temp; - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " and %0,%3,%2\n" - " bne %2,2f\n" - " xor %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,3f\n" - " mb\n" - "2:\n" - ".section .text2,\"ax\"\n" - "3: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (*addr), "=&r" -(oldvalue) - :"Ir" (1), "m" (*addr)); + __asm__ __volatile__( + "1: ldl_l %0,%1\n" + " and %0,%3,%2\n" + " bne %2,2f\n" + " xor %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,3f\n" + " mb\n" + "2:\n" + ".section .text2,\"ax\"\n" + "3: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue) + :"Ir" (1), "m" (*addr)); - return oldvalue; - } -# else - -- > Need implementation of GC_test_and_set() -# endif -# endif + return oldvalue; + } + /* Should probably also define GC_clear, since it needs */ + /* a memory barrier ?? */ +# endif /* ALPHA */ +# ifdef ARM32 + inline static int GC_test_and_set(volatile unsigned int *addr) { + int oldval; + /* SWP on ARM is very similar to XCHG on x86. Doesn't lock the + * bus because there are no SMP ARM machines. If/when there are, + * this code will likely need to be updated. */ + /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */ + __asm__ __volatile__("swp %0, %1, [%2]" + : "=r"(oldval) + : "r"(1), "r"(addr)); + return oldval; + } # endif - inline static void GC_clear(volatile unsigned int *addr) { +# ifndef GC_CLEAR_DEFINED + inline static void GC_clear(volatile unsigned int *addr) { + /* Try to discourage gcc from moving anything past this. */ + __asm__ __volatile__(" "); *(addr) = 0; - } + } +# endif extern volatile unsigned int GC_allocate_lock; - /* This is not a mutex because mutexes that obey the (optional) */ - /* POSIX scheduling rules are subject to convoys in high contention */ - /* applications. This is basically a spin lock. */ extern pthread_t GC_lock_holder; extern void GC_lock(void); /* Allocation lock holder. Only set if acquired by client through */ @@ -517,12 +564,19 @@ addr) { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); } # define UNLOCK() \ GC_clear(&GC_allocate_lock) - extern GC_bool GC_collecting; + extern VOLATILE GC_bool GC_collecting; # define ENTER_GC() \ { \ GC_collecting = 1; \ } # define EXIT_GC() GC_collecting = 0; +# else /* LINUX_THREADS on hardware for which we don't know how */ + /* to do test and set. */ +# include + extern pthread_mutex_t GC_allocate_ml; +# define LOCK() pthread_mutex_lock(&GC_allocate_ml) +# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml) +# endif # endif /* LINUX_THREADS */ # if defined(HPUX_THREADS) # include @@ -581,7 +635,7 @@ addr) *(volatile unsigned long *)(&GC_allocate_lock) = 0; } # endif # endif - extern GC_bool GC_collecting; + extern VOLATILE GC_bool GC_collecting; # define ENTER_GC() \ { \ GC_collecting = 1; \ @@ -957,8 +1011,10 @@ struct hblk { /* The type of mark procedures. This really belongs in gc_mark.h. */ /* But we put it here, so that we can avoid scanning the mark proc */ /* table. */ -typedef struct ms_entry * (*mark_proc)(/* word * addr, mark_stack_ptr, - mark_stack_limit, env */); +typedef struct ms_entry * (*mark_proc)(/* word * addr, + struct ms_entry *mark_stack_ptr, + struct ms_entry *mark_stack_limit, + word env */); # define LOG_MAX_MARK_PROCS 6 # define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS) @@ -1035,6 +1091,7 @@ struct roots { struct _GC_arrays { word _heapsize; word _max_heapsize; + word _requested_heapsize; /* Heap size due to explicit expansion */ ptr_t _last_heap_addr; ptr_t _prev_heap_addr; word _large_free_bytes; @@ -1059,6 +1116,10 @@ struct _GC_arrays { word _mem_freed; /* Number of explicitly deallocated words of memory */ /* since last collection. */ + ptr_t _scratch_end_ptr; + ptr_t _scratch_last_end_ptr; + /* Used by headers.c, and can easily appear to point to */ + /* heap. */ mark_proc _mark_procs[MAX_MARK_PROCS]; /* Table of user-defined mark procedures. There is */ /* a small number of these, which can be referenced */ @@ -1223,9 +1284,12 @@ GC_API GC_FAR struct _GC_arrays GC_arrays; # define GC_words_finalized GC_arrays._words_finalized # define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc # define GC_mem_freed GC_arrays._mem_freed +# define GC_scratch_end_ptr GC_arrays._scratch_end_ptr +# define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr # define GC_mark_procs GC_arrays._mark_procs # define GC_heapsize GC_arrays._heapsize # define GC_max_heapsize GC_arrays._max_heapsize +# define GC_requested_heapsize GC_arrays._requested_heapsize # define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc # define GC_heap_sects GC_arrays._heap_sects # define GC_last_stack GC_arrays._last_stack @@ -1260,6 +1324,8 @@ GC_API GC_FAR struct _GC_arrays GC_arrays; # define beginGC_arrays ((ptr_t)(&GC_arrays)) # define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays)) +#define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes) + /* Object kinds: */ # define MAXOBJKINDS 16 @@ -1339,7 +1405,7 @@ extern GC_bool GC_objects_are_marked; /* There are mar extern GC_bool GC_incremental; /* Using incremental/generational collection. */ #else -# define GC_incremental TRUE +# define GC_incremental FALSE /* Hopefully allow optimizer to remove some code. */ #endif @@ -1392,10 +1458,7 @@ extern ptr_t GC_greatest_plausible_heap_addr; ptr_t GC_approx_sp(); GC_bool GC_should_collect(); -#ifdef PRESERVE_LAST - GC_bool GC_in_last_heap_sect(/* ptr_t */); - /* In last added heap section? If so, avoid breaking up. */ -#endif + void GC_apply_to_all_blocks(/*fn, client_data*/); /* Invoke fn(hbp, client_data) for each */ /* allocated heap block. */ @@ -1672,9 +1735,10 @@ ptr_t GC_allocobj(/* sz_inn_words, kind */); /* head. */ void GC_init_headers(); -GC_bool GC_install_header(/*h*/); +struct hblkhdr * GC_install_header(/*h*/); /* Install a header for block h. */ - /* Return FALSE on failure. */ + /* Return 0 on failure, or the header */ + /* otherwise. */ GC_bool GC_install_counts(/*h, sz*/); /* Set up forwarding counts for block */ /* h of size sz. */