version 1.1.1.1, 1999/11/27 10:58:32 |
version 1.1.1.2, 2000/04/14 11:07:58 |
Line 87 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = { |
|
Line 87 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = { |
|
# define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE) |
# define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE) |
/* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */ |
/* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */ |
/* multiple of HBLKSIZE. */ |
/* multiple of HBLKSIZE. */ |
|
/* The incremental collector actually likes a larger */ |
|
/* size, since it want to push all marked dirty objs */ |
|
/* before marking anything new. Currently we let it */ |
|
/* grow dynamically. */ |
# endif |
# endif |
|
|
/* |
/* |
Line 254 ptr_t cold_gc_frame; |
|
Line 258 ptr_t cold_gc_frame; |
|
|
|
case MS_PUSH_RESCUERS: |
case MS_PUSH_RESCUERS: |
if (GC_mark_stack_top |
if (GC_mark_stack_top |
>= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) { |
>= GC_mark_stack + GC_mark_stack_size |
|
- INITIAL_MARK_STACK_SIZE/2) { |
|
/* Go ahead and mark, even though that might cause us to */ |
|
/* see more marked dirty objects later on. Avoid this */ |
|
/* in the future. */ |
|
GC_mark_stack_too_small = TRUE; |
GC_mark_from_mark_stack(); |
GC_mark_from_mark_stack(); |
return(FALSE); |
return(FALSE); |
} else { |
} else { |
|
|
# endif |
# endif |
word p; |
word p; |
{ |
{ |
|
# ifdef NURSERY |
|
if (0 != GC_push_proc) { |
|
GC_push_proc(p); |
|
return; |
|
} |
|
# endif |
GC_PUSH_ONE_STACK(p, 0); |
GC_PUSH_ONE_STACK(p, 0); |
} |
} |
|
|
|
|
# endif |
# endif |
|
|
/* As above, but argument passed preliminary test. */ |
/* As above, but argument passed preliminary test. */ |
# ifdef PRINT_BLACK_LIST |
# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS) |
void GC_push_one_checked(p, interior_ptrs, source) |
void GC_push_one_checked(p, interior_ptrs, source) |
ptr_t source; |
ptr_t source; |
# else |
# else |
Line 744 register GC_bool interior_ptrs; |
|
Line 759 register GC_bool interior_ptrs; |
|
} else { |
} else { |
if (!mark_bit_from_hdr(hhdr, displ)) { |
if (!mark_bit_from_hdr(hhdr, displ)) { |
set_mark_bit_from_hdr(hhdr, displ); |
set_mark_bit_from_hdr(hhdr, displ); |
|
GC_STORE_BACK_PTR(source, (ptr_t)r); |
PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top, |
PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top, |
&(GC_mark_stack[GC_mark_stack_size])); |
&(GC_mark_stack[GC_mark_stack_size])); |
} |
} |
Line 1102 struct hblk *h; |
|
Line 1118 struct hblk *h; |
|
{ |
{ |
register hdr * hhdr; |
register hdr * hhdr; |
|
|
h = GC_next_block(h); |
h = GC_next_used_block(h); |
if (h == 0) return(0); |
if (h == 0) return(0); |
hhdr = HDR(h); |
hhdr = HDR(h); |
GC_push_marked(h, hhdr); |
GC_push_marked(h, hhdr); |
Line 1114 struct hblk *h; |
|
Line 1130 struct hblk *h; |
|
struct hblk * GC_push_next_marked_dirty(h) |
struct hblk * GC_push_next_marked_dirty(h) |
struct hblk *h; |
struct hblk *h; |
{ |
{ |
register hdr * hhdr = HDR(h); |
register hdr * hhdr; |
|
|
if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); } |
if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); } |
for (;;) { |
for (;;) { |
h = GC_next_block(h); |
h = GC_next_used_block(h); |
if (h == 0) return(0); |
if (h == 0) return(0); |
hhdr = HDR(h); |
hhdr = HDR(h); |
# ifdef STUBBORN_ALLOC |
# ifdef STUBBORN_ALLOC |
Line 1147 struct hblk *h; |
|
Line 1163 struct hblk *h; |
|
register hdr * hhdr = HDR(h); |
register hdr * hhdr = HDR(h); |
|
|
for (;;) { |
for (;;) { |
h = GC_next_block(h); |
h = GC_next_used_block(h); |
if (h == 0) return(0); |
if (h == 0) return(0); |
hhdr = HDR(h); |
hhdr = HDR(h); |
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break; |
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break; |