version 1.3, 2000/12/01 09:26:11 |
version 1.6, 2002/07/24 08:00:10 |
|
|
/* |
/* |
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers |
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers |
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. |
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. |
|
* Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved. |
* |
* |
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED |
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED |
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK. |
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK. |
|
|
|
|
|
|
# include <stdio.h> |
# include <stdio.h> |
# include "gc_priv.h" |
# include "private/gc_pmark.h" |
# include "gc_mark.h" |
|
|
|
/* We put this here to minimize the risk of inlining. */ |
/* We put this here to minimize the risk of inlining. */ |
/*VARARGS*/ |
/*VARARGS*/ |
Line 46 word GC_n_mark_procs = GC_RESERVED_MARK_PROCS; |
|
Line 46 word GC_n_mark_procs = GC_RESERVED_MARK_PROCS; |
|
/* It's done here, since we need to deal with mark descriptors. */ |
/* It's done here, since we need to deal with mark descriptors. */ |
struct obj_kind GC_obj_kinds[MAXOBJKINDS] = { |
struct obj_kind GC_obj_kinds[MAXOBJKINDS] = { |
/* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */, |
/* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */, |
0 | DS_LENGTH, FALSE, FALSE }, |
0 | GC_DS_LENGTH, FALSE, FALSE }, |
/* NORMAL */ { &GC_objfreelist[0], 0, |
/* NORMAL */ { &GC_objfreelist[0], 0, |
# if defined(ADD_BYTE_AT_END) && ALIGNMENT > DS_TAGS |
0 | GC_DS_LENGTH, /* Adjusted in GC_init_inner for EXTRA_BYTES */ |
(word)(-ALIGNMENT) | DS_LENGTH, |
|
# else |
|
0 | DS_LENGTH, |
|
# endif |
|
TRUE /* add length to descr */, TRUE }, |
TRUE /* add length to descr */, TRUE }, |
/* UNCOLLECTABLE */ |
/* UNCOLLECTABLE */ |
{ &GC_uobjfreelist[0], 0, |
{ &GC_uobjfreelist[0], 0, |
0 | DS_LENGTH, TRUE /* add length to descr */, TRUE }, |
0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE }, |
# ifdef ATOMIC_UNCOLLECTABLE |
# ifdef ATOMIC_UNCOLLECTABLE |
/* AUNCOLLECTABLE */ |
/* AUNCOLLECTABLE */ |
{ &GC_auobjfreelist[0], 0, |
{ &GC_auobjfreelist[0], 0, |
0 | DS_LENGTH, FALSE /* add length to descr */, FALSE }, |
0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE }, |
# endif |
# endif |
# ifdef STUBBORN_ALLOC |
# ifdef STUBBORN_ALLOC |
/*STUBBORN*/ { &GC_sobjfreelist[0], 0, |
/*STUBBORN*/ { &GC_sobjfreelist[0], 0, |
0 | DS_LENGTH, TRUE /* add length to descr */, TRUE }, |
0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE }, |
# endif |
# endif |
}; |
}; |
|
|
Line 104 word GC_n_rescuing_pages; /* Number of dirty pages we |
|
Line 100 word GC_n_rescuing_pages; /* Number of dirty pages we |
|
|
|
mse * GC_mark_stack; |
mse * GC_mark_stack; |
|
|
|
mse * GC_mark_stack_limit; |
|
|
word GC_mark_stack_size = 0; |
word GC_mark_stack_size = 0; |
|
|
mse * GC_mark_stack_top; |
#ifdef PARALLEL_MARK |
|
mse * VOLATILE GC_mark_stack_top; |
|
#else |
|
mse * GC_mark_stack_top; |
|
#endif |
|
|
static struct hblk * scan_ptr; |
static struct hblk * scan_ptr; |
|
|
Line 129 GC_bool GC_collection_in_progress() |
|
Line 131 GC_bool GC_collection_in_progress() |
|
void GC_clear_hdr_marks(hhdr) |
void GC_clear_hdr_marks(hhdr) |
register hdr * hhdr; |
register hdr * hhdr; |
{ |
{ |
BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word)); |
# ifdef USE_MARK_BYTES |
|
BZERO(hhdr -> hb_marks, MARK_BITS_SZ); |
|
# else |
|
BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word)); |
|
# endif |
} |
} |
|
|
/* Set all mark bits in the header. Used for uncollectable blocks. */ |
/* Set all mark bits in the header. Used for uncollectable blocks. */ |
Line 139 register hdr * hhdr; |
|
Line 145 register hdr * hhdr; |
|
register int i; |
register int i; |
|
|
for (i = 0; i < MARK_BITS_SZ; ++i) { |
for (i = 0; i < MARK_BITS_SZ; ++i) { |
|
# ifdef USE_MARK_BYTES |
|
hhdr -> hb_marks[i] = 1; |
|
# else |
hhdr -> hb_marks[i] = ONES; |
hhdr -> hb_marks[i] = ONES; |
|
# endif |
} |
} |
} |
} |
|
|
Line 147 register hdr * hhdr; |
|
Line 157 register hdr * hhdr; |
|
* Clear all mark bits associated with block h. |
* Clear all mark bits associated with block h. |
*/ |
*/ |
/*ARGSUSED*/ |
/*ARGSUSED*/ |
static void clear_marks_for_block(h, dummy) |
# if defined(__STDC__) || defined(__cplusplus) |
struct hblk *h; |
static void clear_marks_for_block(struct hblk *h, word dummy) |
word dummy; |
# else |
|
static void clear_marks_for_block(h, dummy) |
|
struct hblk *h; |
|
word dummy; |
|
# endif |
{ |
{ |
register hdr * hhdr = HDR(h); |
register hdr * hhdr = HDR(h); |
|
|
Line 227 void GC_initiate_gc() |
|
Line 241 void GC_initiate_gc() |
|
if (GC_dirty_maintained) GC_check_dirty(); |
if (GC_dirty_maintained) GC_check_dirty(); |
} |
} |
# endif |
# endif |
# ifdef GATHERSTATS |
GC_n_rescuing_pages = 0; |
GC_n_rescuing_pages = 0; |
|
# endif |
|
if (GC_mark_state == MS_NONE) { |
if (GC_mark_state == MS_NONE) { |
GC_mark_state = MS_PUSH_RESCUERS; |
GC_mark_state = MS_PUSH_RESCUERS; |
} else if (GC_mark_state != MS_INVALID) { |
} else if (GC_mark_state != MS_INVALID) { |
Line 252 static void alloc_mark_stack(); |
|
Line 264 static void alloc_mark_stack(); |
|
GC_bool GC_mark_some(cold_gc_frame) |
GC_bool GC_mark_some(cold_gc_frame) |
ptr_t cold_gc_frame; |
ptr_t cold_gc_frame; |
{ |
{ |
#ifdef MSWIN32 |
#if defined(MSWIN32) && !defined(__GNUC__) |
/* Windows 98 appears to asynchronously create and remove writable */ |
/* Windows 98 appears to asynchronously create and remove writable */ |
/* memory mappings, for reasons we haven't yet understood. Since */ |
/* memory mappings, for reasons we haven't yet understood. Since */ |
/* we look for writable regions to determine the root set, we may */ |
/* we look for writable regions to determine the root set, we may */ |
Line 262 ptr_t cold_gc_frame; |
|
Line 274 ptr_t cold_gc_frame; |
|
/* Note that this code should never generate an incremental GC write */ |
/* Note that this code should never generate an incremental GC write */ |
/* fault. */ |
/* fault. */ |
__try { |
__try { |
#endif |
#endif /* defined(MSWIN32) && !defined(__GNUC__) */ |
switch(GC_mark_state) { |
switch(GC_mark_state) { |
case MS_NONE: |
case MS_NONE: |
return(FALSE); |
return(FALSE); |
|
|
case MS_PUSH_RESCUERS: |
case MS_PUSH_RESCUERS: |
if (GC_mark_stack_top |
if (GC_mark_stack_top |
>= GC_mark_stack + GC_mark_stack_size |
>= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) { |
- INITIAL_MARK_STACK_SIZE/2) { |
|
/* Go ahead and mark, even though that might cause us to */ |
/* Go ahead and mark, even though that might cause us to */ |
/* see more marked dirty objects later on. Avoid this */ |
/* see more marked dirty objects later on. Avoid this */ |
/* in the future. */ |
/* in the future. */ |
GC_mark_stack_too_small = TRUE; |
GC_mark_stack_too_small = TRUE; |
GC_mark_from_mark_stack(); |
MARK_FROM_MARK_STACK(); |
return(FALSE); |
return(FALSE); |
} else { |
} else { |
scan_ptr = GC_push_next_marked_dirty(scan_ptr); |
scan_ptr = GC_push_next_marked_dirty(scan_ptr); |
if (scan_ptr == 0) { |
if (scan_ptr == 0) { |
# ifdef PRINTSTATS |
# ifdef CONDPRINT |
|
if (GC_print_stats) { |
GC_printf1("Marked from %lu dirty pages\n", |
GC_printf1("Marked from %lu dirty pages\n", |
(unsigned long)GC_n_rescuing_pages); |
(unsigned long)GC_n_rescuing_pages); |
|
} |
# endif |
# endif |
GC_push_roots(FALSE, cold_gc_frame); |
GC_push_roots(FALSE, cold_gc_frame); |
GC_objects_are_marked = TRUE; |
GC_objects_are_marked = TRUE; |
Line 295 ptr_t cold_gc_frame; |
|
Line 308 ptr_t cold_gc_frame; |
|
|
|
case MS_PUSH_UNCOLLECTABLE: |
case MS_PUSH_UNCOLLECTABLE: |
if (GC_mark_stack_top |
if (GC_mark_stack_top |
>= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) { |
>= GC_mark_stack + GC_mark_stack_size/4) { |
GC_mark_from_mark_stack(); |
# ifdef PARALLEL_MARK |
|
/* Avoid this, since we don't parallelize the marker */ |
|
/* here. */ |
|
if (GC_parallel) GC_mark_stack_too_small = TRUE; |
|
# endif |
|
MARK_FROM_MARK_STACK(); |
return(FALSE); |
return(FALSE); |
} else { |
} else { |
scan_ptr = GC_push_next_marked_uncollectable(scan_ptr); |
scan_ptr = GC_push_next_marked_uncollectable(scan_ptr); |
Line 311 ptr_t cold_gc_frame; |
|
Line 329 ptr_t cold_gc_frame; |
|
return(FALSE); |
return(FALSE); |
|
|
case MS_ROOTS_PUSHED: |
case MS_ROOTS_PUSHED: |
|
# ifdef PARALLEL_MARK |
|
/* In the incremental GC case, this currently doesn't */ |
|
/* quite do the right thing, since it runs to */ |
|
/* completion. On the other hand, starting a */ |
|
/* parallel marker is expensive, so perhaps it is */ |
|
/* the right thing? */ |
|
/* Eventually, incremental marking should run */ |
|
/* asynchronously in multiple threads, without grabbing */ |
|
/* the allocation lock. */ |
|
if (GC_parallel) { |
|
GC_do_parallel_mark(); |
|
GC_ASSERT(GC_mark_stack_top < GC_first_nonempty); |
|
GC_mark_stack_top = GC_mark_stack - 1; |
|
if (GC_mark_stack_too_small) { |
|
alloc_mark_stack(2*GC_mark_stack_size); |
|
} |
|
if (GC_mark_state == MS_ROOTS_PUSHED) { |
|
GC_mark_state = MS_NONE; |
|
return(TRUE); |
|
} else { |
|
return(FALSE); |
|
} |
|
} |
|
# endif |
if (GC_mark_stack_top >= GC_mark_stack) { |
if (GC_mark_stack_top >= GC_mark_stack) { |
GC_mark_from_mark_stack(); |
MARK_FROM_MARK_STACK(); |
return(FALSE); |
return(FALSE); |
} else { |
} else { |
GC_mark_state = MS_NONE; |
GC_mark_state = MS_NONE; |
Line 329 ptr_t cold_gc_frame; |
|
Line 371 ptr_t cold_gc_frame; |
|
return(FALSE); |
return(FALSE); |
} |
} |
if (GC_mark_stack_top >= GC_mark_stack) { |
if (GC_mark_stack_top >= GC_mark_stack) { |
GC_mark_from_mark_stack(); |
MARK_FROM_MARK_STACK(); |
return(FALSE); |
return(FALSE); |
} |
} |
if (scan_ptr == 0 && GC_mark_state == MS_INVALID) { |
if (scan_ptr == 0 && GC_mark_state == MS_INVALID) { |
Line 353 ptr_t cold_gc_frame; |
|
Line 395 ptr_t cold_gc_frame; |
|
ABORT("GC_mark_some: bad state"); |
ABORT("GC_mark_some: bad state"); |
return(FALSE); |
return(FALSE); |
} |
} |
#ifdef MSWIN32 |
#if defined(MSWIN32) && !defined(__GNUC__) |
} __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ? |
} __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ? |
EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { |
EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { |
# ifdef PRINTSTATS |
# ifdef CONDPRINT |
|
if (GC_print_stats) { |
GC_printf0("Caught ACCESS_VIOLATION in marker. " |
GC_printf0("Caught ACCESS_VIOLATION in marker. " |
"Memory mapping disappeared.\n"); |
"Memory mapping disappeared.\n"); |
# endif /* PRINTSTATS */ |
} |
|
# endif /* CONDPRINT */ |
/* We have bad roots on the stack. Discard mark stack. */ |
/* We have bad roots on the stack. Discard mark stack. */ |
/* Rescan from marked objects. Redetermine roots. */ |
/* Rescan from marked objects. Redetermine roots. */ |
GC_invalidate_mark_state(); |
GC_invalidate_mark_state(); |
scan_ptr = 0; |
scan_ptr = 0; |
return FALSE; |
return FALSE; |
} |
} |
#endif /* MSWIN32 */ |
#endif /* defined(MSWIN32) && !defined(__GNUC__) */ |
} |
} |
|
|
|
|
Line 383 GC_bool GC_mark_stack_empty() |
|
Line 427 GC_bool GC_mark_stack_empty() |
|
#endif |
#endif |
|
|
/* Given a pointer to someplace other than a small object page or the */ |
/* Given a pointer to someplace other than a small object page or the */ |
/* first page of a large object, return a pointer either to the */ |
/* first page of a large object, either: */ |
/* start of the large object or NIL. */ |
/* - return a pointer to somewhere in the first page of the large */ |
/* In the latter case black list the address current. */ |
/* object, if current points to a large object. */ |
/* Returns NIL without black listing if current points to a block */ |
/* In this case *hhdr is replaced with a pointer to the header */ |
/* with IGNORE_OFF_PAGE set. */ |
/* for the large object. */ |
|
/* - just return current if it does not point to a large object. */ |
/*ARGSUSED*/ |
/*ARGSUSED*/ |
# ifdef PRINT_BLACK_LIST |
ptr_t GC_find_start(current, hhdr, new_hdr_p) |
ptr_t GC_find_start(current, hhdr, source) |
|
word source; |
|
# else |
|
ptr_t GC_find_start(current, hhdr) |
|
# define source 0 |
|
# endif |
|
register ptr_t current; |
register ptr_t current; |
register hdr * hhdr; |
register hdr *hhdr, **new_hdr_p; |
{ |
{ |
# ifdef ALL_INTERIOR_POINTERS |
if (GC_all_interior_pointers) { |
if (hhdr != 0) { |
if (hhdr != 0) { |
register ptr_t orig = current; |
register ptr_t orig = current; |
|
|
current = (ptr_t)HBLKPTR(current) + HDR_BYTES; |
current = (ptr_t)HBLKPTR(current); |
do { |
do { |
current = current - HBLKSIZE*(word)hhdr; |
current = current - HBLKSIZE*(word)hhdr; |
hhdr = HDR(current); |
hhdr = HDR(current); |
Line 413 register hdr * hhdr; |
|
Line 452 register hdr * hhdr; |
|
if ((word *)orig - (word *)current |
if ((word *)orig - (word *)current |
>= (ptrdiff_t)(hhdr->hb_sz)) { |
>= (ptrdiff_t)(hhdr->hb_sz)) { |
/* Pointer past the end of the block */ |
/* Pointer past the end of the block */ |
GC_ADD_TO_BLACK_LIST_NORMAL(orig, source); |
return(orig); |
return(0); |
|
} |
} |
|
*new_hdr_p = hhdr; |
return(current); |
return(current); |
} else { |
} else { |
GC_ADD_TO_BLACK_LIST_NORMAL(current, source); |
return(current); |
return(0); |
|
} |
} |
# else |
} else { |
GC_ADD_TO_BLACK_LIST_NORMAL(current, source); |
return(current); |
return(0); |
} |
# endif |
|
# undef source |
|
} |
} |
|
|
void GC_invalidate_mark_state() |
void GC_invalidate_mark_state() |
|
|
{ |
{ |
GC_mark_state = MS_INVALID; |
GC_mark_state = MS_INVALID; |
GC_mark_stack_too_small = TRUE; |
GC_mark_stack_too_small = TRUE; |
# ifdef PRINTSTATS |
# ifdef CONDPRINT |
|
if (GC_print_stats) { |
GC_printf1("Mark stack overflow; current size = %lu entries\n", |
GC_printf1("Mark stack overflow; current size = %lu entries\n", |
GC_mark_stack_size); |
GC_mark_stack_size); |
# endif |
} |
return(msp-INITIAL_MARK_STACK_SIZE/8); |
# endif |
|
return(msp - GC_MARK_STACK_DISCARDS); |
} |
} |
|
|
|
|
/* |
/* |
* Mark objects pointed to by the regions described by |
* Mark objects pointed to by the regions described by |
* mark stack entries between GC_mark_stack and GC_mark_stack_top, |
* mark stack entries between GC_mark_stack and GC_mark_stack_top, |
|
|
* encoding, we optionally maintain a cache for the block address to |
* encoding, we optionally maintain a cache for the block address to |
* header mapping, we prefetch when an object is "grayed", etc. |
* header mapping, we prefetch when an object is "grayed", etc. |
*/ |
*/ |
void GC_mark_from_mark_stack() |
mse * GC_mark_from(mark_stack_top, mark_stack, mark_stack_limit) |
|
mse * mark_stack_top; |
|
mse * mark_stack; |
|
mse * mark_stack_limit; |
{ |
{ |
mse * GC_mark_stack_reg = GC_mark_stack; |
|
mse * GC_mark_stack_top_reg = GC_mark_stack_top; |
|
mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]); |
|
int credit = HBLKSIZE; /* Remaining credit for marking work */ |
int credit = HBLKSIZE; /* Remaining credit for marking work */ |
register word * current_p; /* Pointer to current candidate ptr. */ |
register word * current_p; /* Pointer to current candidate ptr. */ |
register word current; /* Candidate pointer. */ |
register word current; /* Candidate pointer. */ |
Line 481 void GC_mark_from_mark_stack() |
|
Line 518 void GC_mark_from_mark_stack() |
|
GC_objects_are_marked = TRUE; |
GC_objects_are_marked = TRUE; |
INIT_HDR_CACHE; |
INIT_HDR_CACHE; |
# ifdef OS2 /* Use untweaked version to circumvent compiler problem */ |
# ifdef OS2 /* Use untweaked version to circumvent compiler problem */ |
while (GC_mark_stack_top_reg >= GC_mark_stack_reg && credit >= 0) { |
while (mark_stack_top >= mark_stack && credit >= 0) { |
# else |
# else |
while ((((ptr_t)GC_mark_stack_top_reg - (ptr_t)GC_mark_stack_reg) | credit) |
while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit) |
>= 0) { |
>= 0) { |
# endif |
# endif |
current_p = GC_mark_stack_top_reg -> mse_start; |
current_p = mark_stack_top -> mse_start; |
descr = GC_mark_stack_top_reg -> mse_descr; |
descr = mark_stack_top -> mse_descr; |
retry: |
retry: |
/* current_p and descr describe the current object. */ |
/* current_p and descr describe the current object. */ |
/* *GC_mark_stack_top_reg is vacant. */ |
/* *mark_stack_top is vacant. */ |
/* The following is 0 only for small objects described by a simple */ |
/* The following is 0 only for small objects described by a simple */ |
/* length descriptor. For many applications this is the common */ |
/* length descriptor. For many applications this is the common */ |
/* case, so we try to detect it quickly. */ |
/* case, so we try to detect it quickly. */ |
if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | DS_TAGS)) { |
if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) { |
word tag = descr & DS_TAGS; |
word tag = descr & GC_DS_TAGS; |
|
|
switch(tag) { |
switch(tag) { |
case DS_LENGTH: |
case GC_DS_LENGTH: |
/* Large length. */ |
/* Large length. */ |
/* Process part of the range to avoid pushing too much on the */ |
/* Process part of the range to avoid pushing too much on the */ |
/* stack. */ |
/* stack. */ |
GC_mark_stack_top_reg -> mse_start = |
GC_ASSERT(descr < GC_greatest_plausible_heap_addr |
|
- GC_least_plausible_heap_addr); |
|
# ifdef PARALLEL_MARK |
|
# define SHARE_BYTES 2048 |
|
if (descr > SHARE_BYTES && GC_parallel |
|
&& mark_stack_top < mark_stack_limit - 1) { |
|
int new_size = (descr/2) & ~(sizeof(word)-1); |
|
mark_stack_top -> mse_start = current_p; |
|
mark_stack_top -> mse_descr = new_size + sizeof(word); |
|
/* makes sure we handle */ |
|
/* misaligned pointers. */ |
|
mark_stack_top++; |
|
current_p = (word *) ((char *)current_p + new_size); |
|
descr -= new_size; |
|
goto retry; |
|
} |
|
# endif /* PARALLEL_MARK */ |
|
mark_stack_top -> mse_start = |
limit = current_p + SPLIT_RANGE_WORDS-1; |
limit = current_p + SPLIT_RANGE_WORDS-1; |
GC_mark_stack_top_reg -> mse_descr = |
mark_stack_top -> mse_descr = |
descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1); |
descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1); |
/* Make sure that pointers overlapping the two ranges are */ |
/* Make sure that pointers overlapping the two ranges are */ |
/* considered. */ |
/* considered. */ |
limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT); |
limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT); |
break; |
break; |
case DS_BITMAP: |
case GC_DS_BITMAP: |
GC_mark_stack_top_reg--; |
mark_stack_top--; |
descr &= ~DS_TAGS; |
descr &= ~GC_DS_TAGS; |
credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */ |
credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */ |
while (descr != 0) { |
while (descr != 0) { |
if ((signed_word)descr < 0) { |
if ((signed_word)descr < 0) { |
current = *current_p; |
current = *current_p; |
if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) { |
if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) { |
PREFETCH(current); |
PREFETCH(current); |
HC_PUSH_CONTENTS((ptr_t)current, GC_mark_stack_top_reg, |
HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top, |
mark_stack_limit, current_p, exit1); |
mark_stack_limit, current_p, exit1); |
} |
} |
} |
} |
Line 527 void GC_mark_from_mark_stack() |
|
Line 581 void GC_mark_from_mark_stack() |
|
++ current_p; |
++ current_p; |
} |
} |
continue; |
continue; |
case DS_PROC: |
case GC_DS_PROC: |
GC_mark_stack_top_reg--; |
mark_stack_top--; |
credit -= PROC_BYTES; |
credit -= GC_PROC_BYTES; |
GC_mark_stack_top_reg = |
mark_stack_top = |
(*PROC(descr)) |
(*PROC(descr)) |
(current_p, GC_mark_stack_top_reg, |
(current_p, mark_stack_top, |
mark_stack_limit, ENV(descr)); |
mark_stack_limit, ENV(descr)); |
continue; |
continue; |
case DS_PER_OBJECT: |
case GC_DS_PER_OBJECT: |
if ((signed_word)descr >= 0) { |
if ((signed_word)descr >= 0) { |
/* Descriptor is in the object. */ |
/* Descriptor is in the object. */ |
descr = *(word *)((ptr_t)current_p + descr - DS_PER_OBJECT); |
descr = *(word *)((ptr_t)current_p + descr - GC_DS_PER_OBJECT); |
} else { |
} else { |
/* Descriptor is in type descriptor pointed to by first */ |
/* Descriptor is in type descriptor pointed to by first */ |
/* word in object. */ |
/* word in object. */ |
Line 552 void GC_mark_from_mark_stack() |
|
Line 606 void GC_mark_from_mark_stack() |
|
/* object case explicitly. */ |
/* object case explicitly. */ |
if (0 == type_descr) { |
if (0 == type_descr) { |
/* Rarely executed. */ |
/* Rarely executed. */ |
GC_mark_stack_top_reg--; |
mark_stack_top--; |
continue; |
continue; |
} |
} |
descr = *(word *)(type_descr |
descr = *(word *)(type_descr |
- (descr - (DS_PER_OBJECT - INDIR_PER_OBJ_BIAS))); |
- (descr - (GC_DS_PER_OBJECT |
|
- GC_INDIR_PER_OBJ_BIAS))); |
} |
} |
if (0 == descr) { |
if (0 == descr) { |
GC_mark_stack_top_reg--; |
/* Can happen either because we generated a 0 descriptor */ |
continue; |
/* or we saw a pointer to a free object. */ |
|
mark_stack_top--; |
|
continue; |
} |
} |
goto retry; |
goto retry; |
} |
} |
} else /* Small object with length descriptor */ { |
} else /* Small object with length descriptor */ { |
GC_mark_stack_top_reg--; |
mark_stack_top--; |
limit = (word *)(((ptr_t)current_p) + (word)descr); |
limit = (word *)(((ptr_t)current_p) + (word)descr); |
} |
} |
/* The simple case in which we're scanning a range. */ |
/* The simple case in which we're scanning a range. */ |
|
GC_ASSERT(!((word)current_p & (ALIGNMENT-1))); |
credit -= (ptr_t)limit - (ptr_t)current_p; |
credit -= (ptr_t)limit - (ptr_t)current_p; |
limit -= 1; |
limit -= 1; |
{ |
{ |
Line 585 void GC_mark_from_mark_stack() |
|
Line 643 void GC_mark_from_mark_stack() |
|
/* for this loop is still not great. */ |
/* for this loop is still not great. */ |
for(;;) { |
for(;;) { |
PREFETCH((ptr_t)limit - PREF_DIST*CACHE_LINE_SIZE); |
PREFETCH((ptr_t)limit - PREF_DIST*CACHE_LINE_SIZE); |
|
GC_ASSERT(limit >= current_p); |
deferred = *limit; |
deferred = *limit; |
limit = (word *)((char *)limit - ALIGNMENT); |
limit = (word *)((char *)limit - ALIGNMENT); |
if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) { |
if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) { |
Line 614 void GC_mark_from_mark_stack() |
|
Line 673 void GC_mark_from_mark_stack() |
|
/* Prefetch the contents of the object we just pushed. It's */ |
/* Prefetch the contents of the object we just pushed. It's */ |
/* likely we will need them soon. */ |
/* likely we will need them soon. */ |
PREFETCH(current); |
PREFETCH(current); |
HC_PUSH_CONTENTS((ptr_t)current, GC_mark_stack_top_reg, |
HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top, |
mark_stack_limit, current_p, exit2); |
mark_stack_limit, current_p, exit2); |
} |
} |
current_p = (word *)((char *)current_p + ALIGNMENT); |
current_p = (word *)((char *)current_p + ALIGNMENT); |
Line 624 void GC_mark_from_mark_stack() |
|
Line 683 void GC_mark_from_mark_stack() |
|
/* We still need to mark the entry we previously prefetched. */ |
/* We still need to mark the entry we previously prefetched. */ |
/* We alrady know that it passes the preliminary pointer */ |
/* We alrady know that it passes the preliminary pointer */ |
/* validity test. */ |
/* validity test. */ |
HC_PUSH_CONTENTS((ptr_t)deferred, GC_mark_stack_top_reg, |
HC_PUSH_CONTENTS((ptr_t)deferred, mark_stack_top, |
mark_stack_limit, current_p, exit4); |
mark_stack_limit, current_p, exit4); |
next_object:; |
next_object:; |
# endif |
# endif |
} |
} |
} |
} |
GC_mark_stack_top = GC_mark_stack_top_reg; |
return mark_stack_top; |
} |
} |
|
|
|
#ifdef PARALLEL_MARK |
|
|
|
/* We assume we have an ANSI C Compiler. */ |
|
GC_bool GC_help_wanted = FALSE; |
|
unsigned GC_helper_count = 0; |
|
unsigned GC_active_count = 0; |
|
mse * VOLATILE GC_first_nonempty; |
|
word GC_mark_no = 0; |
|
|
|
#define LOCAL_MARK_STACK_SIZE HBLKSIZE |
|
/* Under normal circumstances, this is big enough to guarantee */ |
|
/* We don't overflow half of it in a single call to */ |
|
/* GC_mark_from. */ |
|
|
|
|
|
/* Steal mark stack entries starting at mse low into mark stack local */ |
|
/* until we either steal mse high, or we have max entries. */ |
|
/* Return a pointer to the top of the local mark stack. */ |
|
/* *next is replaced by a pointer to the next unscanned mark stack */ |
|
/* entry. */ |
|
mse * GC_steal_mark_stack(mse * low, mse * high, mse * local, |
|
unsigned max, mse **next) |
|
{ |
|
mse *p; |
|
mse *top = local - 1; |
|
unsigned i = 0; |
|
|
|
GC_ASSERT(high >= low-1 && high - low + 1 <= GC_mark_stack_size); |
|
for (p = low; p <= high && i <= max; ++p) { |
|
word descr = *(volatile word *) &(p -> mse_descr); |
|
if (descr != 0) { |
|
*(volatile word *) &(p -> mse_descr) = 0; |
|
++top; |
|
top -> mse_descr = descr; |
|
top -> mse_start = p -> mse_start; |
|
GC_ASSERT( top -> mse_descr & GC_DS_TAGS != GC_DS_LENGTH || |
|
top -> mse_descr < GC_greatest_plausible_heap_addr |
|
- GC_least_plausible_heap_addr); |
|
/* There is no synchronization here. We assume that at */ |
|
/* least one thread will see the original descriptor. */ |
|
/* Otherwise we need a barrier. */ |
|
/* More than one thread may get this entry, but that's only */ |
|
/* a minor performance problem. */ |
|
/* If this is a big object, count it as */ |
|
/* size/256 + 1 objects. */ |
|
++i; |
|
if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (descr >> 8); |
|
} |
|
} |
|
*next = p; |
|
return top; |
|
} |
|
|
|
/* Copy back a local mark stack. */ |
|
/* low and high are inclusive bounds. */ |
|
void GC_return_mark_stack(mse * low, mse * high) |
|
{ |
|
mse * my_top; |
|
mse * my_start; |
|
size_t stack_size; |
|
|
|
if (high < low) return; |
|
stack_size = high - low + 1; |
|
GC_acquire_mark_lock(); |
|
my_top = GC_mark_stack_top; |
|
my_start = my_top + 1; |
|
if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) { |
|
# ifdef CONDPRINT |
|
if (GC_print_stats) { |
|
GC_printf0("No room to copy back mark stack."); |
|
} |
|
# endif |
|
GC_mark_state = MS_INVALID; |
|
GC_mark_stack_too_small = TRUE; |
|
/* We drop the local mark stack. We'll fix things later. */ |
|
} else { |
|
BCOPY(low, my_start, stack_size * sizeof(mse)); |
|
GC_ASSERT(GC_mark_stack_top = my_top); |
|
# if !defined(IA64) && !defined(HP_PA) |
|
GC_memory_write_barrier(); |
|
# endif |
|
/* On IA64, the volatile write acts as a release barrier. */ |
|
GC_mark_stack_top = my_top + stack_size; |
|
} |
|
GC_release_mark_lock(); |
|
GC_notify_all_marker(); |
|
} |
|
|
|
/* Mark from the local mark stack. */ |
|
/* On return, the local mark stack is empty. */ |
|
/* But this may be achieved by copying the */ |
|
/* local mark stack back into the global one. */ |
|
void GC_do_local_mark(mse *local_mark_stack, mse *local_top) |
|
{ |
|
unsigned n; |
|
# define N_LOCAL_ITERS 1 |
|
|
|
# ifdef GC_ASSERTIONS |
|
/* Make sure we don't hold mark lock. */ |
|
GC_acquire_mark_lock(); |
|
GC_release_mark_lock(); |
|
# endif |
|
for (;;) { |
|
for (n = 0; n < N_LOCAL_ITERS; ++n) { |
|
local_top = GC_mark_from(local_top, local_mark_stack, |
|
local_mark_stack + LOCAL_MARK_STACK_SIZE); |
|
if (local_top < local_mark_stack) return; |
|
if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) { |
|
GC_return_mark_stack(local_mark_stack, local_top); |
|
return; |
|
} |
|
} |
|
if (GC_mark_stack_top < GC_first_nonempty && |
|
GC_active_count < GC_helper_count |
|
&& local_top > local_mark_stack + 1) { |
|
/* Try to share the load, since the main stack is empty, */ |
|
/* and helper threads are waiting for a refill. */ |
|
/* The entries near the bottom of the stack are likely */ |
|
/* to require more work. Thus we return those, eventhough */ |
|
/* it's harder. */ |
|
mse * p; |
|
mse * new_bottom = local_mark_stack |
|
+ (local_top - local_mark_stack)/2; |
|
GC_ASSERT(new_bottom > local_mark_stack |
|
&& new_bottom < local_top); |
|
GC_return_mark_stack(local_mark_stack, new_bottom - 1); |
|
memmove(local_mark_stack, new_bottom, |
|
(local_top - new_bottom + 1) * sizeof(mse)); |
|
local_top -= (new_bottom - local_mark_stack); |
|
} |
|
} |
|
} |
|
|
|
#define ENTRIES_TO_GET 5 |
|
|
|
long GC_markers = 2; /* Normally changed by thread-library- */ |
|
/* -specific code. */ |
|
|
|
/* Mark using the local mark stack until the global mark stack is empty */ |
|
/* and there are no active workers. Update GC_first_nonempty to reflect */ |
|
/* progress. */ |
|
/* Caller does not hold mark lock. */ |
|
/* Caller has already incremented GC_helper_count. We decrement it, */ |
|
/* and maintain GC_active_count. */ |
|
void GC_mark_local(mse *local_mark_stack, int id) |
|
{ |
|
mse * my_first_nonempty; |
|
|
|
GC_acquire_mark_lock(); |
|
GC_active_count++; |
|
my_first_nonempty = GC_first_nonempty; |
|
GC_ASSERT(GC_first_nonempty >= GC_mark_stack && |
|
GC_first_nonempty <= GC_mark_stack_top + 1); |
|
# ifdef PRINTSTATS |
|
GC_printf1("Starting mark helper %lu\n", (unsigned long)id); |
|
# endif |
|
GC_release_mark_lock(); |
|
for (;;) { |
|
size_t n_on_stack; |
|
size_t n_to_get; |
|
mse *next; |
|
mse * my_top; |
|
mse * local_top; |
|
mse * global_first_nonempty = GC_first_nonempty; |
|
|
|
GC_ASSERT(my_first_nonempty >= GC_mark_stack && |
|
my_first_nonempty <= GC_mark_stack_top + 1); |
|
GC_ASSERT(global_first_nonempty >= GC_mark_stack && |
|
global_first_nonempty <= GC_mark_stack_top + 1); |
|
if (my_first_nonempty < global_first_nonempty) { |
|
my_first_nonempty = global_first_nonempty; |
|
} else if (global_first_nonempty < my_first_nonempty) { |
|
GC_compare_and_exchange((word *)(&GC_first_nonempty), |
|
(word) global_first_nonempty, |
|
(word) my_first_nonempty); |
|
/* If this fails, we just go ahead, without updating */ |
|
/* GC_first_nonempty. */ |
|
} |
|
/* Perhaps we should also update GC_first_nonempty, if it */ |
|
/* is less. But that would require using atomic updates. */ |
|
my_top = GC_mark_stack_top; |
|
n_on_stack = my_top - my_first_nonempty + 1; |
|
if (0 == n_on_stack) { |
|
GC_acquire_mark_lock(); |
|
my_top = GC_mark_stack_top; |
|
n_on_stack = my_top - my_first_nonempty + 1; |
|
if (0 == n_on_stack) { |
|
GC_active_count--; |
|
GC_ASSERT(GC_active_count <= GC_helper_count); |
|
/* Other markers may redeposit objects */ |
|
/* on the stack. */ |
|
if (0 == GC_active_count) GC_notify_all_marker(); |
|
while (GC_active_count > 0 |
|
&& GC_first_nonempty > GC_mark_stack_top) { |
|
/* We will be notified if either GC_active_count */ |
|
/* reaches zero, or if more objects are pushed on */ |
|
/* the global mark stack. */ |
|
GC_wait_marker(); |
|
} |
|
if (GC_active_count == 0 && |
|
GC_first_nonempty > GC_mark_stack_top) { |
|
GC_bool need_to_notify = FALSE; |
|
/* The above conditions can't be falsified while we */ |
|
/* hold the mark lock, since neither */ |
|
/* GC_active_count nor GC_mark_stack_top can */ |
|
/* change. GC_first_nonempty can only be */ |
|
/* incremented asynchronously. Thus we know that */ |
|
/* both conditions actually held simultaneously. */ |
|
GC_helper_count--; |
|
if (0 == GC_helper_count) need_to_notify = TRUE; |
|
# ifdef PRINTSTATS |
|
GC_printf1( |
|
"Finished mark helper %lu\n", (unsigned long)id); |
|
# endif |
|
GC_release_mark_lock(); |
|
if (need_to_notify) GC_notify_all_marker(); |
|
return; |
|
} |
|
/* else there's something on the stack again, or */ |
|
/* another helper may push something. */ |
|
GC_active_count++; |
|
GC_ASSERT(GC_active_count > 0); |
|
GC_release_mark_lock(); |
|
continue; |
|
} else { |
|
GC_release_mark_lock(); |
|
} |
|
} |
|
n_to_get = ENTRIES_TO_GET; |
|
if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1; |
|
local_top = GC_steal_mark_stack(my_first_nonempty, my_top, |
|
local_mark_stack, n_to_get, |
|
&my_first_nonempty); |
|
GC_ASSERT(my_first_nonempty >= GC_mark_stack && |
|
my_first_nonempty <= GC_mark_stack_top + 1); |
|
GC_do_local_mark(local_mark_stack, local_top); |
|
} |
|
} |
|
|
|
/* Perform Parallel mark. */ |
|
/* We hold the GC lock, not the mark lock. */ |
|
/* Currently runs until the mark stack is */ |
|
/* empty. */ |
|
void GC_do_parallel_mark() |
|
{ |
|
mse local_mark_stack[LOCAL_MARK_STACK_SIZE]; |
|
mse * local_top; |
|
mse * my_top; |
|
|
|
GC_acquire_mark_lock(); |
|
GC_ASSERT(I_HOLD_LOCK()); |
|
/* This could be a GC_ASSERT, but it seems safer to keep it on */ |
|
/* all the time, especially since it's cheap. */ |
|
if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0) |
|
ABORT("Tried to start parallel mark in bad state"); |
|
# ifdef PRINTSTATS |
|
GC_printf1("Starting marking for mark phase number %lu\n", |
|
(unsigned long)GC_mark_no); |
|
# endif |
|
GC_first_nonempty = GC_mark_stack; |
|
GC_active_count = 0; |
|
GC_helper_count = 1; |
|
GC_help_wanted = TRUE; |
|
GC_release_mark_lock(); |
|
GC_notify_all_marker(); |
|
/* Wake up potential helpers. */ |
|
GC_mark_local(local_mark_stack, 0); |
|
GC_acquire_mark_lock(); |
|
GC_help_wanted = FALSE; |
|
/* Done; clean up. */ |
|
while (GC_helper_count > 0) GC_wait_marker(); |
|
/* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */ |
|
# ifdef PRINTSTATS |
|
GC_printf1( |
|
"Finished marking for mark phase number %lu\n", |
|
(unsigned long)GC_mark_no); |
|
# endif |
|
GC_mark_no++; |
|
GC_release_mark_lock(); |
|
GC_notify_all_marker(); |
|
} |
|
|
|
|
|
/* Try to help out the marker, if it's running. */ |
|
/* We do not hold the GC lock, but the requestor does. */ |
|
void GC_help_marker(word my_mark_no) |
|
{ |
|
mse local_mark_stack[LOCAL_MARK_STACK_SIZE]; |
|
unsigned my_id; |
|
mse * my_first_nonempty; |
|
|
|
if (!GC_parallel) return; |
|
GC_acquire_mark_lock(); |
|
while (GC_mark_no < my_mark_no |
|
|| !GC_help_wanted && GC_mark_no == my_mark_no) { |
|
GC_wait_marker(); |
|
} |
|
my_id = GC_helper_count; |
|
if (GC_mark_no != my_mark_no || my_id >= GC_markers) { |
|
/* Second test is useful only if original threads can also */ |
|
/* act as helpers. Under Linux they can't. */ |
|
GC_release_mark_lock(); |
|
return; |
|
} |
|
GC_helper_count = my_id + 1; |
|
GC_release_mark_lock(); |
|
GC_mark_local(local_mark_stack, my_id); |
|
/* GC_mark_local decrements GC_helper_count. */ |
|
} |
|
|
|
#endif /* PARALLEL_MARK */ |
|
|
/* Allocate or reallocate space for mark stack of size s words */ |
/* Allocate or reallocate space for mark stack of size s words */ |
/* May silently fail. */ |
/* May silently fail. */ |
static void alloc_mark_stack(n) |
static void alloc_mark_stack(n) |
word n; |
word n; |
{ |
{ |
mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct ms_entry)); |
mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry)); |
|
|
GC_mark_stack_too_small = FALSE; |
GC_mark_stack_too_small = FALSE; |
if (GC_mark_stack_size != 0) { |
if (GC_mark_stack_size != 0) { |
if (new_stack != 0) { |
if (new_stack != 0) { |
word displ = (word)GC_mark_stack & (GC_page_size - 1); |
word displ = (word)GC_mark_stack & (GC_page_size - 1); |
signed_word size = GC_mark_stack_size * sizeof(struct ms_entry); |
signed_word size = GC_mark_stack_size * sizeof(struct GC_ms_entry); |
|
|
/* Recycle old space */ |
/* Recycle old space */ |
if (0 != displ) displ = GC_page_size - displ; |
if (0 != displ) displ = GC_page_size - displ; |
|
|
} |
} |
GC_mark_stack = new_stack; |
GC_mark_stack = new_stack; |
GC_mark_stack_size = n; |
GC_mark_stack_size = n; |
# ifdef PRINTSTATS |
GC_mark_stack_limit = new_stack + n; |
|
# ifdef CONDPRINT |
|
if (GC_print_stats) { |
GC_printf1("Grew mark stack to %lu frames\n", |
GC_printf1("Grew mark stack to %lu frames\n", |
(unsigned long) GC_mark_stack_size); |
(unsigned long) GC_mark_stack_size); |
|
} |
# endif |
# endif |
} else { |
} else { |
# ifdef PRINTSTATS |
# ifdef CONDPRINT |
|
if (GC_print_stats) { |
GC_printf1("Failed to grow mark stack to %lu frames\n", |
GC_printf1("Failed to grow mark stack to %lu frames\n", |
(unsigned long) n); |
(unsigned long) n); |
|
} |
# endif |
# endif |
} |
} |
} else { |
} else { |
|
|
} |
} |
GC_mark_stack = new_stack; |
GC_mark_stack = new_stack; |
GC_mark_stack_size = n; |
GC_mark_stack_size = n; |
|
GC_mark_stack_limit = new_stack + n; |
} |
} |
GC_mark_stack_top = GC_mark_stack-1; |
GC_mark_stack_top = GC_mark_stack-1; |
} |
} |
|
|
top = (ptr_t)(((word) top) & ~(ALIGNMENT-1)); |
top = (ptr_t)(((word) top) & ~(ALIGNMENT-1)); |
if (top == 0 || bottom == top) return; |
if (top == 0 || bottom == top) return; |
GC_mark_stack_top++; |
GC_mark_stack_top++; |
if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) { |
if (GC_mark_stack_top >= GC_mark_stack_limit) { |
ABORT("unexpected mark stack overflow"); |
ABORT("unexpected mark stack overflow"); |
} |
} |
length = top - bottom; |
length = top - bottom; |
# if DS_TAGS > ALIGNMENT - 1 |
# if GC_DS_TAGS > ALIGNMENT - 1 |
length += DS_TAGS; |
length += GC_DS_TAGS; |
length &= ~DS_TAGS; |
length &= ~GC_DS_TAGS; |
# endif |
# endif |
GC_mark_stack_top -> mse_start = (word *)bottom; |
GC_mark_stack_top -> mse_start = (word *)bottom; |
GC_mark_stack_top -> mse_descr = length; |
GC_mark_stack_top -> mse_descr = length; |
} |
} |
|
|
/* |
/* |
* Analogous to the above, but push only those pages that may have been |
* Analogous to the above, but push only those pages h with dirty_fn(h) != 0. |
* dirtied. A block h is assumed dirty if dirty_fn(h) != 0. |
|
* We use push_fn to actually push the block. |
* We use push_fn to actually push the block. |
|
* Used both to selectively push dirty pages, or to push a block |
|
* in piecemeal fashion, to allow for more marking concurrency. |
* Will not overflow mark stack if push_fn pushes a small fixed number |
* Will not overflow mark stack if push_fn pushes a small fixed number |
* of entries. (This is invoked only if push_fn pushes a single entry, |
* of entries. (This is invoked only if push_fn pushes a single entry, |
* or if it marks each object before pushing it, thus ensuring progress |
* or if it marks each object before pushing it, thus ensuring progress |
* in the event of a stack overflow.) |
* in the event of a stack overflow.) |
*/ |
*/ |
void GC_push_dirty(bottom, top, dirty_fn, push_fn) |
void GC_push_selected(bottom, top, dirty_fn, push_fn) |
ptr_t bottom; |
ptr_t bottom; |
ptr_t top; |
ptr_t top; |
int (*dirty_fn)(/* struct hblk * h */); |
int (*dirty_fn) GC_PROTO((struct hblk * h)); |
void (*push_fn)(/* ptr_t bottom, ptr_t top */); |
void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top)); |
{ |
{ |
register struct hblk * h; |
register struct hblk * h; |
|
|
Line 759 void (*push_fn)(/* ptr_t bottom, ptr_t top */); |
|
Line 1137 void (*push_fn)(/* ptr_t bottom, ptr_t top */); |
|
(*push_fn)((ptr_t)h, top); |
(*push_fn)((ptr_t)h, top); |
} |
} |
} |
} |
if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) { |
if (GC_mark_stack_top >= GC_mark_stack_limit) { |
ABORT("unexpected mark stack overflow"); |
ABORT("unexpected mark stack overflow"); |
} |
} |
} |
} |
|
|
# ifndef SMALL_CONFIG |
# ifndef SMALL_CONFIG |
|
|
|
#ifdef PARALLEL_MARK |
|
/* Break up root sections into page size chunks to better spread */ |
|
/* out work. */ |
|
GC_bool GC_true_func(struct hblk *h) { return TRUE; } |
|
# define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all); |
|
#else |
|
# define GC_PUSH_ALL(b,t) GC_push_all(b,t); |
|
#endif |
|
|
|
|
void GC_push_conditional(bottom, top, all) |
void GC_push_conditional(bottom, top, all) |
ptr_t bottom; |
ptr_t bottom; |
ptr_t top; |
ptr_t top; |
|
|
if (GC_dirty_maintained) { |
if (GC_dirty_maintained) { |
# ifdef PROC_VDB |
# ifdef PROC_VDB |
/* Pages that were never dirtied cannot contain pointers */ |
/* Pages that were never dirtied cannot contain pointers */ |
GC_push_dirty(bottom, top, GC_page_was_ever_dirty, GC_push_all); |
GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all); |
# else |
# else |
GC_push_all(bottom, top); |
GC_push_all(bottom, top); |
# endif |
# endif |
|
|
GC_push_all(bottom, top); |
GC_push_all(bottom, top); |
} |
} |
} else { |
} else { |
GC_push_dirty(bottom, top, GC_page_was_dirty, GC_push_all); |
GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all); |
} |
} |
} |
} |
#endif |
#endif |
|
|
# ifdef MSWIN32 |
# if defined(MSWIN32) || defined(MSWINCE) |
void __cdecl GC_push_one(p) |
void __cdecl GC_push_one(p) |
# else |
# else |
void GC_push_one(p) |
void GC_push_one(p) |
# endif |
# endif |
word p; |
word p; |
{ |
{ |
# ifdef NURSERY |
|
if (0 != GC_push_proc) { |
|
GC_push_proc(p); |
|
return; |
|
} |
|
# endif |
|
GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER); |
GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER); |
} |
} |
|
|
|
struct GC_ms_entry *GC_mark_and_push(obj, mark_stack_ptr, mark_stack_limit, src) |
|
GC_PTR obj; |
|
struct GC_ms_entry * mark_stack_ptr; |
|
struct GC_ms_entry * mark_stack_limit; |
|
GC_PTR *src; |
|
{ |
|
PREFETCH(obj); |
|
PUSH_CONTENTS(obj, mark_stack_ptr /* modified */, mark_stack_limit, src, |
|
was_marked /* internally generated exit label */); |
|
return mark_stack_ptr; |
|
} |
|
|
# ifdef __STDC__ |
# ifdef __STDC__ |
# define BASE(p) (word)GC_base((void *)(p)) |
# define BASE(p) (word)GC_base((void *)(p)) |
# else |
# else |
# define BASE(p) (word)GC_base((char *)(p)) |
# define BASE(p) (word)GC_base((char *)(p)) |
# endif |
# endif |
|
|
/* As above, but argument passed preliminary test. */ |
/* Mark and push (i.e. gray) a single object p onto the main */ |
|
/* mark stack. Consider p to be valid if it is an interior */ |
|
/* pointer. */ |
|
/* The object p has passed a preliminary pointer validity */ |
|
/* test, but we do not definitely know whether it is valid. */ |
|
/* Mark bits are NOT atomically updated. Thus this must be the */ |
|
/* only thread setting them. */ |
# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS) |
# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS) |
void GC_push_one_checked(p, interior_ptrs, source) |
void GC_mark_and_push_stack(p, source) |
ptr_t source; |
ptr_t source; |
# else |
# else |
void GC_push_one_checked(p, interior_ptrs) |
void GC_mark_and_push_stack(p) |
# define source 0 |
# define source 0 |
# endif |
# endif |
register word p; |
register word p; |
register GC_bool interior_ptrs; |
|
{ |
{ |
register word r; |
register word r; |
register hdr * hhdr; |
register hdr * hhdr; |
Line 826 register GC_bool interior_ptrs; |
|
Line 1226 register GC_bool interior_ptrs; |
|
|
|
GET_HDR(p, hhdr); |
GET_HDR(p, hhdr); |
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { |
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { |
if (hhdr != 0 && interior_ptrs) { |
if (hhdr != 0) { |
r = BASE(p); |
r = BASE(p); |
hhdr = HDR(r); |
hhdr = HDR(r); |
displ = BYTES_TO_WORDS(HBLKDISPL(r)); |
displ = BYTES_TO_WORDS(HBLKDISPL(r)); |
} else { |
|
hhdr = 0; |
|
} |
} |
} else { |
} else { |
register map_entry_type map_entry; |
register map_entry_type map_entry; |
|
|
displ = HBLKDISPL(p); |
displ = HBLKDISPL(p); |
map_entry = MAP_ENTRY((hhdr -> hb_map), displ); |
map_entry = MAP_ENTRY((hhdr -> hb_map), displ); |
if (map_entry == OBJ_INVALID) { |
if (map_entry >= MAX_OFFSET) { |
# ifndef ALL_INTERIOR_POINTERS |
if (map_entry == OFFSET_TOO_BIG || !GC_all_interior_pointers) { |
if (interior_ptrs) { |
|
r = BASE(p); |
r = BASE(p); |
displ = BYTES_TO_WORDS(HBLKDISPL(r)); |
displ = BYTES_TO_WORDS(HBLKDISPL(r)); |
if (r == 0) hhdr = 0; |
if (r == 0) hhdr = 0; |
} else { |
} else { |
|
/* Offset invalid, but map reflects interior pointers */ |
hhdr = 0; |
hhdr = 0; |
} |
} |
# else |
|
/* map already reflects interior pointers */ |
|
hhdr = 0; |
|
# endif |
|
} else { |
} else { |
displ = BYTES_TO_WORDS(displ); |
displ = BYTES_TO_WORDS(displ); |
displ -= map_entry; |
displ -= map_entry; |
Line 860 register GC_bool interior_ptrs; |
|
Line 1254 register GC_bool interior_ptrs; |
|
/* If hhdr != 0 then r == GC_base(p), only we did it faster. */ |
/* If hhdr != 0 then r == GC_base(p), only we did it faster. */ |
/* displ is the word index within the block. */ |
/* displ is the word index within the block. */ |
if (hhdr == 0) { |
if (hhdr == 0) { |
if (interior_ptrs) { |
# ifdef PRINT_BLACK_LIST |
# ifdef PRINT_BLACK_LIST |
GC_add_to_black_list_stack(p, source); |
GC_add_to_black_list_stack(p, source); |
# else |
# else |
GC_add_to_black_list_stack(p); |
GC_add_to_black_list_stack(p); |
# endif |
# endif |
# undef source /* In case we had to define it. */ |
} else { |
|
GC_ADD_TO_BLACK_LIST_NORMAL(p, source); |
|
# undef source /* In case we had to define it. */ |
|
} |
|
} else { |
} else { |
if (!mark_bit_from_hdr(hhdr, displ)) { |
if (!mark_bit_from_hdr(hhdr, displ)) { |
set_mark_bit_from_hdr(hhdr, displ); |
set_mark_bit_from_hdr(hhdr, displ); |
GC_STORE_BACK_PTR(source, (ptr_t)r); |
GC_STORE_BACK_PTR(source, (ptr_t)r); |
PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top, |
PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top, |
&(GC_mark_stack[GC_mark_stack_size])); |
GC_mark_stack_limit); |
} |
} |
} |
} |
} |
} |
|
|
ptr_t top; |
ptr_t top; |
ptr_t cold_gc_frame; |
ptr_t cold_gc_frame; |
{ |
{ |
# ifdef ALL_INTERIOR_POINTERS |
if (GC_all_interior_pointers) { |
# define EAGER_BYTES 1024 |
# define EAGER_BYTES 1024 |
/* Push the hot end of the stack eagerly, so that register values */ |
/* Push the hot end of the stack eagerly, so that register values */ |
/* saved inside GC frames are marked before they disappear. */ |
/* saved inside GC frames are marked before they disappear. */ |
Line 979 ptr_t cold_gc_frame; |
|
Line 1369 ptr_t cold_gc_frame; |
|
return; |
return; |
} |
} |
# ifdef STACK_GROWS_DOWN |
# ifdef STACK_GROWS_DOWN |
GC_push_all_eager(bottom, cold_gc_frame); |
|
GC_push_all(cold_gc_frame - sizeof(ptr_t), top); |
GC_push_all(cold_gc_frame - sizeof(ptr_t), top); |
|
GC_push_all_eager(bottom, cold_gc_frame); |
# else /* STACK_GROWS_UP */ |
# else /* STACK_GROWS_UP */ |
GC_push_all_eager(cold_gc_frame, top); |
|
GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t)); |
GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t)); |
|
GC_push_all_eager(cold_gc_frame, top); |
# endif /* STACK_GROWS_UP */ |
# endif /* STACK_GROWS_UP */ |
# else |
} else { |
GC_push_all_eager(bottom, top); |
GC_push_all_eager(bottom, top); |
# endif |
} |
# ifdef TRACE_BUF |
# ifdef TRACE_BUF |
GC_add_trace_entry("GC_push_all_stack", bottom, top); |
GC_add_trace_entry("GC_push_all_stack", bottom, top); |
# endif |
# endif |
Line 998 void GC_push_all_stack(bottom, top) |
|
Line 1388 void GC_push_all_stack(bottom, top) |
|
ptr_t bottom; |
ptr_t bottom; |
ptr_t top; |
ptr_t top; |
{ |
{ |
# ifdef ALL_INTERIOR_POINTERS |
if (GC_all_interior_pointers) { |
GC_push_all(bottom, top); |
GC_push_all(bottom, top); |
# else |
} else { |
GC_push_all_eager(bottom, top); |
GC_push_all_eager(bottom, top); |
# endif |
} |
} |
} |
|
|
#ifndef SMALL_CONFIG |
#if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) |
/* Push all objects reachable from marked objects in the given block */ |
/* Push all objects reachable from marked objects in the given block */ |
/* of size 1 objects. */ |
/* of size 1 objects. */ |
void GC_push_marked1(h, hhdr) |
void GC_push_marked1(h, hhdr) |
struct hblk *h; |
struct hblk *h; |
register hdr * hhdr; |
register hdr * hhdr; |
{ |
{ |
word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]); |
word * mark_word_addr = &(hhdr->hb_marks[0]); |
register word *p; |
register word *p; |
word *plim; |
word *plim; |
register int i; |
register int i; |
Line 1020 register hdr * hhdr; |
|
Line 1410 register hdr * hhdr; |
|
register word mark_word; |
register word mark_word; |
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; |
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; |
register ptr_t least_ha = GC_least_plausible_heap_addr; |
register ptr_t least_ha = GC_least_plausible_heap_addr; |
|
register mse * mark_stack_top = GC_mark_stack_top; |
|
register mse * mark_stack_limit = GC_mark_stack_limit; |
|
# define GC_mark_stack_top mark_stack_top |
|
# define GC_mark_stack_limit mark_stack_limit |
# define GC_greatest_plausible_heap_addr greatest_ha |
# define GC_greatest_plausible_heap_addr greatest_ha |
# define GC_least_plausible_heap_addr least_ha |
# define GC_least_plausible_heap_addr least_ha |
|
|
Line 1042 register hdr * hhdr; |
|
Line 1436 register hdr * hhdr; |
|
} |
} |
# undef GC_greatest_plausible_heap_addr |
# undef GC_greatest_plausible_heap_addr |
# undef GC_least_plausible_heap_addr |
# undef GC_least_plausible_heap_addr |
|
# undef GC_mark_stack_top |
|
# undef GC_mark_stack_limit |
|
GC_mark_stack_top = mark_stack_top; |
} |
} |
|
|
|
|
Line 1053 void GC_push_marked2(h, hhdr) |
|
Line 1450 void GC_push_marked2(h, hhdr) |
|
struct hblk *h; |
struct hblk *h; |
register hdr * hhdr; |
register hdr * hhdr; |
{ |
{ |
word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]); |
word * mark_word_addr = &(hhdr->hb_marks[0]); |
register word *p; |
register word *p; |
word *plim; |
word *plim; |
register int i; |
register int i; |
Line 1061 register hdr * hhdr; |
|
Line 1458 register hdr * hhdr; |
|
register word mark_word; |
register word mark_word; |
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; |
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; |
register ptr_t least_ha = GC_least_plausible_heap_addr; |
register ptr_t least_ha = GC_least_plausible_heap_addr; |
|
register mse * mark_stack_top = GC_mark_stack_top; |
|
register mse * mark_stack_limit = GC_mark_stack_limit; |
|
# define GC_mark_stack_top mark_stack_top |
|
# define GC_mark_stack_limit mark_stack_limit |
# define GC_greatest_plausible_heap_addr greatest_ha |
# define GC_greatest_plausible_heap_addr greatest_ha |
# define GC_least_plausible_heap_addr least_ha |
# define GC_least_plausible_heap_addr least_ha |
|
|
Line 1085 register hdr * hhdr; |
|
Line 1486 register hdr * hhdr; |
|
} |
} |
# undef GC_greatest_plausible_heap_addr |
# undef GC_greatest_plausible_heap_addr |
# undef GC_least_plausible_heap_addr |
# undef GC_least_plausible_heap_addr |
|
# undef GC_mark_stack_top |
|
# undef GC_mark_stack_limit |
|
GC_mark_stack_top = mark_stack_top; |
} |
} |
|
|
/* Push all objects reachable from marked objects in the given block */ |
/* Push all objects reachable from marked objects in the given block */ |
Line 1095 void GC_push_marked4(h, hhdr) |
|
Line 1499 void GC_push_marked4(h, hhdr) |
|
struct hblk *h; |
struct hblk *h; |
register hdr * hhdr; |
register hdr * hhdr; |
{ |
{ |
word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]); |
word * mark_word_addr = &(hhdr->hb_marks[0]); |
register word *p; |
register word *p; |
word *plim; |
word *plim; |
register int i; |
register int i; |
Line 1103 register hdr * hhdr; |
|
Line 1507 register hdr * hhdr; |
|
register word mark_word; |
register word mark_word; |
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; |
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; |
register ptr_t least_ha = GC_least_plausible_heap_addr; |
register ptr_t least_ha = GC_least_plausible_heap_addr; |
|
register mse * mark_stack_top = GC_mark_stack_top; |
|
register mse * mark_stack_limit = GC_mark_stack_limit; |
|
# define GC_mark_stack_top mark_stack_top |
|
# define GC_mark_stack_limit mark_stack_limit |
# define GC_greatest_plausible_heap_addr greatest_ha |
# define GC_greatest_plausible_heap_addr greatest_ha |
# define GC_least_plausible_heap_addr least_ha |
# define GC_least_plausible_heap_addr least_ha |
|
|
Line 1131 register hdr * hhdr; |
|
Line 1539 register hdr * hhdr; |
|
} |
} |
# undef GC_greatest_plausible_heap_addr |
# undef GC_greatest_plausible_heap_addr |
# undef GC_least_plausible_heap_addr |
# undef GC_least_plausible_heap_addr |
|
# undef GC_mark_stack_top |
|
# undef GC_mark_stack_limit |
|
GC_mark_stack_top = mark_stack_top; |
} |
} |
|
|
#endif /* UNALIGNED */ |
#endif /* UNALIGNED */ |
Line 1148 register hdr * hhdr; |
|
Line 1559 register hdr * hhdr; |
|
register int word_no; |
register int word_no; |
register word * lim; |
register word * lim; |
register mse * GC_mark_stack_top_reg; |
register mse * GC_mark_stack_top_reg; |
register mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]); |
register mse * mark_stack_limit = GC_mark_stack_limit; |
|
|
/* Some quick shortcuts: */ |
/* Some quick shortcuts: */ |
if ((0 | DS_LENGTH) == descr) return; |
if ((0 | GC_DS_LENGTH) == descr) return; |
if (GC_block_empty(hhdr)/* nothing marked */) return; |
if (GC_block_empty(hhdr)/* nothing marked */) return; |
# ifdef GATHERSTATS |
GC_n_rescuing_pages++; |
GC_n_rescuing_pages++; |
|
# endif |
|
GC_objects_are_marked = TRUE; |
GC_objects_are_marked = TRUE; |
if (sz > MAXOBJSZ) { |
if (sz > MAXOBJSZ) { |
lim = (word *)h + HDR_WORDS; |
lim = (word *)h; |
} else { |
} else { |
lim = (word *)(h + 1) - sz; |
lim = (word *)(h + 1) - sz; |
} |
} |
|
|
switch(sz) { |
switch(sz) { |
# if !defined(SMALL_CONFIG) |
# if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) |
case 1: |
case 1: |
GC_push_marked1(h, hhdr); |
GC_push_marked1(h, hhdr); |
break; |
break; |
# endif |
# endif |
# if !defined(SMALL_CONFIG) && !defined(UNALIGNED) |
# if !defined(SMALL_CONFIG) && !defined(UNALIGNED) && \ |
|
!defined(USE_MARK_BYTES) |
case 2: |
case 2: |
GC_push_marked2(h, hhdr); |
GC_push_marked2(h, hhdr); |
break; |
break; |
Line 1179 register hdr * hhdr; |
|
Line 1589 register hdr * hhdr; |
|
# endif |
# endif |
default: |
default: |
GC_mark_stack_top_reg = GC_mark_stack_top; |
GC_mark_stack_top_reg = GC_mark_stack_top; |
for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim; |
for (p = (word *)h, word_no = 0; p <= lim; p += sz, word_no += sz) { |
p += sz, word_no += sz) { |
|
if (mark_bit_from_hdr(hhdr, word_no)) { |
if (mark_bit_from_hdr(hhdr, word_no)) { |
/* Mark from fields inside the object */ |
/* Mark from fields inside the object */ |
PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit); |
PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit); |
Line 1207 register hdr * hhdr; |
|
Line 1616 register hdr * hhdr; |
|
return(GC_page_was_dirty(h)); |
return(GC_page_was_dirty(h)); |
} else { |
} else { |
register ptr_t p = (ptr_t)h; |
register ptr_t p = (ptr_t)h; |
sz += HDR_WORDS; |
|
sz = WORDS_TO_BYTES(sz); |
sz = WORDS_TO_BYTES(sz); |
while (p < (ptr_t)h + sz) { |
while (p < (ptr_t)h + sz) { |
if (GC_page_was_dirty((struct hblk *)p)) return(TRUE); |
if (GC_page_was_dirty((struct hblk *)p)) return(TRUE); |