Annotation of OpenXM_contrib2/asir2000/gc/mark.c, Revision 1.2
1.1 noro 1:
2: /*
3: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4: * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
5: *
6: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8: *
9: * Permission is hereby granted to use or copy this program
10: * for any purpose, provided the above notices are retained on all copies.
11: * Permission to modify the code and to distribute modified code is granted,
12: * provided the above notices are retained, and a notice that the code was
13: * modified is included with the above copyright notice.
14: *
15: */
16:
17:
18: # include <stdio.h>
19: # include "gc_priv.h"
20: # include "gc_mark.h"
21:
22: /* We put this here to minimize the risk of inlining. */
23: /*VARARGS*/
24: #ifdef __WATCOMC__
25: void GC_noop(void *p, ...) {}
26: #else
27: void GC_noop() {}
28: #endif
29:
30: /* Single argument version, robust against whole program analysis. */
31: void GC_noop1(x)
32: word x;
33: {
34: static VOLATILE word sink;
35:
36: sink = x;
37: }
38:
39: /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
40:
41: word GC_n_mark_procs = 0;
42:
43: /* Initialize GC_obj_kinds properly and standard free lists properly. */
44: /* This must be done statically since they may be accessed before */
45: /* GC_init is called. */
46: /* It's done here, since we need to deal with mark descriptors. */
47: struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
48: /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
49: 0 | DS_LENGTH, FALSE, FALSE },
50: /* NORMAL */ { &GC_objfreelist[0], 0,
51: # if defined(ADD_BYTE_AT_END) && ALIGNMENT > DS_TAGS
52: (word)(-ALIGNMENT) | DS_LENGTH,
53: # else
54: 0 | DS_LENGTH,
55: # endif
56: TRUE /* add length to descr */, TRUE },
57: /* UNCOLLECTABLE */
58: { &GC_uobjfreelist[0], 0,
59: 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
60: # ifdef ATOMIC_UNCOLLECTABLE
61: /* AUNCOLLECTABLE */
62: { &GC_auobjfreelist[0], 0,
63: 0 | DS_LENGTH, FALSE /* add length to descr */, FALSE },
64: # endif
65: # ifdef STUBBORN_ALLOC
66: /*STUBBORN*/ { &GC_sobjfreelist[0], 0,
67: 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
68: # endif
69: };
70:
71: # ifdef ATOMIC_UNCOLLECTABLE
72: # ifdef STUBBORN_ALLOC
73: int GC_n_kinds = 5;
74: # else
75: int GC_n_kinds = 4;
76: # endif
77: # else
78: # ifdef STUBBORN_ALLOC
79: int GC_n_kinds = 4;
80: # else
81: int GC_n_kinds = 3;
82: # endif
83: # endif
84:
85:
86: # ifndef INITIAL_MARK_STACK_SIZE
87: # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
88: /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
89: /* multiple of HBLKSIZE. */
1.2 ! noro 90: /* The incremental collector actually likes a larger */
! 91: /* size, since it want to push all marked dirty objs */
! 92: /* before marking anything new. Currently we let it */
! 93: /* grow dynamically. */
1.1 noro 94: # endif
95:
96: /*
97: * Limits of stack for GC_mark routine.
98: * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
99: * need to be marked from.
100: */
101:
102: word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
103: /* excludes ptrfree pages, etc. */
104:
105: mse * GC_mark_stack;
106:
107: word GC_mark_stack_size = 0;
108:
109: mse * GC_mark_stack_top;
110:
111: static struct hblk * scan_ptr;
112:
113: mark_state_t GC_mark_state = MS_NONE;
114:
115: GC_bool GC_mark_stack_too_small = FALSE;
116:
117: GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
118: /* objects in the heap? */
119:
120: /* Is a collection in progress? Note that this can return true in the */
121: /* nonincremental case, if a collection has been abandoned and the */
122: /* mark state is now MS_INVALID. */
123: GC_bool GC_collection_in_progress()
124: {
125: return(GC_mark_state != MS_NONE);
126: }
127:
128: /* clear all mark bits in the header */
129: void GC_clear_hdr_marks(hhdr)
130: register hdr * hhdr;
131: {
132: BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
133: }
134:
135: /* Set all mark bits in the header. Used for uncollectable blocks. */
136: void GC_set_hdr_marks(hhdr)
137: register hdr * hhdr;
138: {
139: register int i;
140:
141: for (i = 0; i < MARK_BITS_SZ; ++i) {
142: hhdr -> hb_marks[i] = ONES;
143: }
144: }
145:
146: /*
147: * Clear all mark bits associated with block h.
148: */
149: /*ARGSUSED*/
150: static void clear_marks_for_block(h, dummy)
151: struct hblk *h;
152: word dummy;
153: {
154: register hdr * hhdr = HDR(h);
155:
156: if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
157: /* Mark bit for these is cleared only once the object is */
158: /* explicitly deallocated. This either frees the block, or */
159: /* the bit is cleared once the object is on the free list. */
160: GC_clear_hdr_marks(hhdr);
161: }
162:
163: /* Slow but general routines for setting/clearing/asking about mark bits */
164: void GC_set_mark_bit(p)
165: ptr_t p;
166: {
167: register struct hblk *h = HBLKPTR(p);
168: register hdr * hhdr = HDR(h);
169: register int word_no = (word *)p - (word *)h;
170:
171: set_mark_bit_from_hdr(hhdr, word_no);
172: }
173:
174: void GC_clear_mark_bit(p)
175: ptr_t p;
176: {
177: register struct hblk *h = HBLKPTR(p);
178: register hdr * hhdr = HDR(h);
179: register int word_no = (word *)p - (word *)h;
180:
181: clear_mark_bit_from_hdr(hhdr, word_no);
182: }
183:
184: GC_bool GC_is_marked(p)
185: ptr_t p;
186: {
187: register struct hblk *h = HBLKPTR(p);
188: register hdr * hhdr = HDR(h);
189: register int word_no = (word *)p - (word *)h;
190:
191: return(mark_bit_from_hdr(hhdr, word_no));
192: }
193:
194:
195: /*
196: * Clear mark bits in all allocated heap blocks. This invalidates
197: * the marker invariant, and sets GC_mark_state to reflect this.
198: * (This implicitly starts marking to reestablish the invariant.)
199: */
200: void GC_clear_marks()
201: {
202: GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
203: GC_objects_are_marked = FALSE;
204: GC_mark_state = MS_INVALID;
205: scan_ptr = 0;
206: # ifdef GATHERSTATS
207: /* Counters reflect currently marked objects: reset here */
208: GC_composite_in_use = 0;
209: GC_atomic_in_use = 0;
210: # endif
211:
212: }
213:
214: /* Initiate a garbage collection. Initiates a full collection if the */
215: /* mark state is invalid. */
216: /*ARGSUSED*/
217: void GC_initiate_gc()
218: {
219: if (GC_dirty_maintained) GC_read_dirty();
220: # ifdef STUBBORN_ALLOC
221: GC_read_changed();
222: # endif
223: # ifdef CHECKSUMS
224: {
225: extern void GC_check_dirty();
226:
227: if (GC_dirty_maintained) GC_check_dirty();
228: }
229: # endif
230: # ifdef GATHERSTATS
231: GC_n_rescuing_pages = 0;
232: # endif
233: if (GC_mark_state == MS_NONE) {
234: GC_mark_state = MS_PUSH_RESCUERS;
235: } else if (GC_mark_state != MS_INVALID) {
236: ABORT("unexpected state");
237: } /* else this is really a full collection, and mark */
238: /* bits are invalid. */
239: scan_ptr = 0;
240: }
241:
242:
243: static void alloc_mark_stack();
244:
245: /* Perform a small amount of marking. */
246: /* We try to touch roughly a page of memory. */
247: /* Return TRUE if we just finished a mark phase. */
248: /* Cold_gc_frame is an address inside a GC frame that */
249: /* remains valid until all marking is complete. */
250: /* A zero value indicates that it's OK to miss some */
251: /* register values. */
252: GC_bool GC_mark_some(cold_gc_frame)
253: ptr_t cold_gc_frame;
254: {
255: switch(GC_mark_state) {
256: case MS_NONE:
257: return(FALSE);
258:
259: case MS_PUSH_RESCUERS:
260: if (GC_mark_stack_top
1.2 ! noro 261: >= GC_mark_stack + GC_mark_stack_size
! 262: - INITIAL_MARK_STACK_SIZE/2) {
! 263: /* Go ahead and mark, even though that might cause us to */
! 264: /* see more marked dirty objects later on. Avoid this */
! 265: /* in the future. */
! 266: GC_mark_stack_too_small = TRUE;
1.1 noro 267: GC_mark_from_mark_stack();
268: return(FALSE);
269: } else {
270: scan_ptr = GC_push_next_marked_dirty(scan_ptr);
271: if (scan_ptr == 0) {
272: # ifdef PRINTSTATS
273: GC_printf1("Marked from %lu dirty pages\n",
274: (unsigned long)GC_n_rescuing_pages);
275: # endif
276: GC_push_roots(FALSE, cold_gc_frame);
277: GC_objects_are_marked = TRUE;
278: if (GC_mark_state != MS_INVALID) {
279: GC_mark_state = MS_ROOTS_PUSHED;
280: }
281: }
282: }
283: return(FALSE);
284:
285: case MS_PUSH_UNCOLLECTABLE:
286: if (GC_mark_stack_top
287: >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
288: GC_mark_from_mark_stack();
289: return(FALSE);
290: } else {
291: scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
292: if (scan_ptr == 0) {
293: GC_push_roots(TRUE, cold_gc_frame);
294: GC_objects_are_marked = TRUE;
295: if (GC_mark_state != MS_INVALID) {
296: GC_mark_state = MS_ROOTS_PUSHED;
297: }
298: }
299: }
300: return(FALSE);
301:
302: case MS_ROOTS_PUSHED:
303: if (GC_mark_stack_top >= GC_mark_stack) {
304: GC_mark_from_mark_stack();
305: return(FALSE);
306: } else {
307: GC_mark_state = MS_NONE;
308: if (GC_mark_stack_too_small) {
309: alloc_mark_stack(2*GC_mark_stack_size);
310: }
311: return(TRUE);
312: }
313:
314: case MS_INVALID:
315: case MS_PARTIALLY_INVALID:
316: if (!GC_objects_are_marked) {
317: GC_mark_state = MS_PUSH_UNCOLLECTABLE;
318: return(FALSE);
319: }
320: if (GC_mark_stack_top >= GC_mark_stack) {
321: GC_mark_from_mark_stack();
322: return(FALSE);
323: }
324: if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
325: /* About to start a heap scan for marked objects. */
326: /* Mark stack is empty. OK to reallocate. */
327: if (GC_mark_stack_too_small) {
328: alloc_mark_stack(2*GC_mark_stack_size);
329: }
330: GC_mark_state = MS_PARTIALLY_INVALID;
331: }
332: scan_ptr = GC_push_next_marked(scan_ptr);
333: if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
334: GC_push_roots(TRUE, cold_gc_frame);
335: GC_objects_are_marked = TRUE;
336: if (GC_mark_state != MS_INVALID) {
337: GC_mark_state = MS_ROOTS_PUSHED;
338: }
339: }
340: return(FALSE);
341: default:
342: ABORT("GC_mark_some: bad state");
343: return(FALSE);
344: }
345: }
346:
347:
348: GC_bool GC_mark_stack_empty()
349: {
350: return(GC_mark_stack_top < GC_mark_stack);
351: }
352:
353: #ifdef PROF_MARKER
354: word GC_prof_array[10];
355: # define PROF(n) GC_prof_array[n]++
356: #else
357: # define PROF(n)
358: #endif
359:
360: /* Given a pointer to someplace other than a small object page or the */
361: /* first page of a large object, return a pointer either to the */
362: /* start of the large object or NIL. */
363: /* In the latter case black list the address current. */
364: /* Returns NIL without black listing if current points to a block */
365: /* with IGNORE_OFF_PAGE set. */
366: /*ARGSUSED*/
367: # ifdef PRINT_BLACK_LIST
368: word GC_find_start(current, hhdr, source)
369: word source;
370: # else
371: word GC_find_start(current, hhdr)
372: # define source 0
373: # endif
374: register word current;
375: register hdr * hhdr;
376: {
377: # ifdef ALL_INTERIOR_POINTERS
378: if (hhdr != 0) {
379: register word orig = current;
380:
381: current = (word)HBLKPTR(current) + HDR_BYTES;
382: do {
383: current = current - HBLKSIZE*(word)hhdr;
384: hhdr = HDR(current);
385: } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
386: /* current points to the start of the large object */
387: if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(0);
388: if ((word *)orig - (word *)current
389: >= (ptrdiff_t)(hhdr->hb_sz)) {
390: /* Pointer past the end of the block */
391: GC_ADD_TO_BLACK_LIST_NORMAL(orig, source);
392: return(0);
393: }
394: return(current);
395: } else {
396: GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
397: return(0);
398: }
399: # else
400: GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
401: return(0);
402: # endif
403: # undef source
404: }
405:
406: void GC_invalidate_mark_state()
407: {
408: GC_mark_state = MS_INVALID;
409: GC_mark_stack_top = GC_mark_stack-1;
410: }
411:
412: mse * GC_signal_mark_stack_overflow(msp)
413: mse * msp;
414: {
415: GC_mark_state = MS_INVALID;
416: GC_mark_stack_too_small = TRUE;
417: # ifdef PRINTSTATS
418: GC_printf1("Mark stack overflow; current size = %lu entries\n",
419: GC_mark_stack_size);
420: # endif
421: return(msp-INITIAL_MARK_STACK_SIZE/8);
422: }
423:
424:
425: /*
426: * Mark objects pointed to by the regions described by
427: * mark stack entries between GC_mark_stack and GC_mark_stack_top,
428: * inclusive. Assumes the upper limit of a mark stack entry
429: * is never 0. A mark stack entry never has size 0.
430: * We try to traverse on the order of a hblk of memory before we return.
431: * Caller is responsible for calling this until the mark stack is empty.
432: */
433: void GC_mark_from_mark_stack()
434: {
435: mse * GC_mark_stack_reg = GC_mark_stack;
436: mse * GC_mark_stack_top_reg = GC_mark_stack_top;
437: mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
438: int credit = HBLKSIZE; /* Remaining credit for marking work */
439: register word * current_p; /* Pointer to current candidate ptr. */
440: register word current; /* Candidate pointer. */
441: register word * limit; /* (Incl) limit of current candidate */
442: /* range */
443: register word descr;
444: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
445: register ptr_t least_ha = GC_least_plausible_heap_addr;
446: # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
447:
448: GC_objects_are_marked = TRUE;
449: # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
450: while (GC_mark_stack_top_reg >= GC_mark_stack_reg && credit >= 0) {
451: # else
452: while ((((ptr_t)GC_mark_stack_top_reg - (ptr_t)GC_mark_stack_reg) | credit)
453: >= 0) {
454: # endif
455: current_p = GC_mark_stack_top_reg -> mse_start;
456: retry:
457: descr = GC_mark_stack_top_reg -> mse_descr;
458: if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | DS_TAGS)) {
459: word tag = descr & DS_TAGS;
460:
461: switch(tag) {
462: case DS_LENGTH:
463: /* Large length. */
464: /* Process part of the range to avoid pushing too much on the */
465: /* stack. */
466: GC_mark_stack_top_reg -> mse_start =
467: limit = current_p + SPLIT_RANGE_WORDS-1;
468: GC_mark_stack_top_reg -> mse_descr -=
469: WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
470: /* Make sure that pointers overlapping the two ranges are */
471: /* considered. */
472: limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
473: break;
474: case DS_BITMAP:
475: GC_mark_stack_top_reg--;
476: descr &= ~DS_TAGS;
477: credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
478: while (descr != 0) {
479: if ((signed_word)descr < 0) {
480: current = *current_p;
481: if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
482: PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit,
483: current_p, exit1);
484: }
485: }
486: descr <<= 1;
487: ++ current_p;
488: }
489: continue;
490: case DS_PROC:
491: GC_mark_stack_top_reg--;
492: credit -= PROC_BYTES;
493: GC_mark_stack_top_reg =
494: (*PROC(descr))
495: (current_p, GC_mark_stack_top_reg,
496: mark_stack_limit, ENV(descr));
497: continue;
498: case DS_PER_OBJECT:
499: GC_mark_stack_top_reg -> mse_descr =
500: *(word *)((ptr_t)current_p + descr - tag);
501: goto retry;
502: }
503: } else {
504: GC_mark_stack_top_reg--;
505: limit = (word *)(((ptr_t)current_p) + (word)descr);
506: }
507: /* The simple case in which we're scanning a range. */
508: credit -= (ptr_t)limit - (ptr_t)current_p;
509: limit -= 1;
510: while (current_p <= limit) {
511: current = *current_p;
512: if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
513: PUSH_CONTENTS(current, GC_mark_stack_top_reg,
514: mark_stack_limit, current_p, exit2);
515: }
516: current_p = (word *)((char *)current_p + ALIGNMENT);
517: }
518: }
519: GC_mark_stack_top = GC_mark_stack_top_reg;
520: }
521:
522: /* Allocate or reallocate space for mark stack of size s words */
523: /* May silently fail. */
524: static void alloc_mark_stack(n)
525: word n;
526: {
527: mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct ms_entry));
528:
529: GC_mark_stack_too_small = FALSE;
530: if (GC_mark_stack_size != 0) {
531: if (new_stack != 0) {
532: word displ = (word)GC_mark_stack & (GC_page_size - 1);
533: signed_word size = GC_mark_stack_size * sizeof(struct ms_entry);
534:
535: /* Recycle old space */
536: if (0 != displ) displ = GC_page_size - displ;
537: size = (size - displ) & ~(GC_page_size - 1);
538: if (size > 0) {
539: GC_add_to_heap((struct hblk *)
540: ((word)GC_mark_stack + displ), (word)size);
541: }
542: GC_mark_stack = new_stack;
543: GC_mark_stack_size = n;
544: # ifdef PRINTSTATS
545: GC_printf1("Grew mark stack to %lu frames\n",
546: (unsigned long) GC_mark_stack_size);
547: # endif
548: } else {
549: # ifdef PRINTSTATS
550: GC_printf1("Failed to grow mark stack to %lu frames\n",
551: (unsigned long) n);
552: # endif
553: }
554: } else {
555: if (new_stack == 0) {
556: GC_err_printf0("No space for mark stack\n");
557: EXIT();
558: }
559: GC_mark_stack = new_stack;
560: GC_mark_stack_size = n;
561: }
562: GC_mark_stack_top = GC_mark_stack-1;
563: }
564:
565: void GC_mark_init()
566: {
567: alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
568: }
569:
570: /*
571: * Push all locations between b and t onto the mark stack.
572: * b is the first location to be checked. t is one past the last
573: * location to be checked.
574: * Should only be used if there is no possibility of mark stack
575: * overflow.
576: */
577: void GC_push_all(bottom, top)
578: ptr_t bottom;
579: ptr_t top;
580: {
581: register word length;
582:
583: bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
584: top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
585: if (top == 0 || bottom == top) return;
586: GC_mark_stack_top++;
587: if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
588: ABORT("unexpected mark stack overflow");
589: }
590: length = top - bottom;
591: # if DS_TAGS > ALIGNMENT - 1
592: length += DS_TAGS;
593: length &= ~DS_TAGS;
594: # endif
595: GC_mark_stack_top -> mse_start = (word *)bottom;
596: GC_mark_stack_top -> mse_descr = length;
597: }
598:
599: /*
600: * Analogous to the above, but push only those pages that may have been
601: * dirtied. A block h is assumed dirty if dirty_fn(h) != 0.
602: * We use push_fn to actually push the block.
603: * Will not overflow mark stack if push_fn pushes a small fixed number
604: * of entries. (This is invoked only if push_fn pushes a single entry,
605: * or if it marks each object before pushing it, thus ensuring progress
606: * in the event of a stack overflow.)
607: */
608: void GC_push_dirty(bottom, top, dirty_fn, push_fn)
609: ptr_t bottom;
610: ptr_t top;
611: int (*dirty_fn)(/* struct hblk * h */);
612: void (*push_fn)(/* ptr_t bottom, ptr_t top */);
613: {
614: register struct hblk * h;
615:
616: bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
617: top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
618:
619: if (top == 0 || bottom == top) return;
620: h = HBLKPTR(bottom + HBLKSIZE);
621: if (top <= (ptr_t) h) {
622: if ((*dirty_fn)(h-1)) {
623: (*push_fn)(bottom, top);
624: }
625: return;
626: }
627: if ((*dirty_fn)(h-1)) {
628: (*push_fn)(bottom, (ptr_t)h);
629: }
630: while ((ptr_t)(h+1) <= top) {
631: if ((*dirty_fn)(h)) {
632: if ((word)(GC_mark_stack_top - GC_mark_stack)
633: > 3 * GC_mark_stack_size / 4) {
634: /* Danger of mark stack overflow */
635: (*push_fn)((ptr_t)h, top);
636: return;
637: } else {
638: (*push_fn)((ptr_t)h, (ptr_t)(h+1));
639: }
640: }
641: h++;
642: }
643: if ((ptr_t)h != top) {
644: if ((*dirty_fn)(h)) {
645: (*push_fn)((ptr_t)h, top);
646: }
647: }
648: if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
649: ABORT("unexpected mark stack overflow");
650: }
651: }
652:
653: # ifndef SMALL_CONFIG
654: void GC_push_conditional(bottom, top, all)
655: ptr_t bottom;
656: ptr_t top;
657: int all;
658: {
659: if (all) {
660: if (GC_dirty_maintained) {
661: # ifdef PROC_VDB
662: /* Pages that were never dirtied cannot contain pointers */
663: GC_push_dirty(bottom, top, GC_page_was_ever_dirty, GC_push_all);
664: # else
665: GC_push_all(bottom, top);
666: # endif
667: } else {
668: GC_push_all(bottom, top);
669: }
670: } else {
671: GC_push_dirty(bottom, top, GC_page_was_dirty, GC_push_all);
672: }
673: }
674: #endif
675:
676: # ifdef MSWIN32
677: void __cdecl GC_push_one(p)
678: # else
679: void GC_push_one(p)
680: # endif
681: word p;
682: {
1.2 ! noro 683: # ifdef NURSERY
! 684: if (0 != GC_push_proc) {
! 685: GC_push_proc(p);
! 686: return;
! 687: }
! 688: # endif
1.1 noro 689: GC_PUSH_ONE_STACK(p, 0);
690: }
691:
692: # ifdef __STDC__
693: # define BASE(p) (word)GC_base((void *)(p))
694: # else
695: # define BASE(p) (word)GC_base((char *)(p))
696: # endif
697:
698: /* As above, but argument passed preliminary test. */
699: # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
700: void GC_push_one_checked(p, interior_ptrs, source)
701: ptr_t source;
702: # else
703: void GC_push_one_checked(p, interior_ptrs)
704: # define source 0
705: # endif
706: register word p;
707: register GC_bool interior_ptrs;
708: {
709: register word r;
710: register hdr * hhdr;
711: register int displ;
712:
713: GET_HDR(p, hhdr);
714: if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
715: if (hhdr != 0 && interior_ptrs) {
716: r = BASE(p);
717: hhdr = HDR(r);
718: displ = BYTES_TO_WORDS(HBLKDISPL(r));
719: } else {
720: hhdr = 0;
721: }
722: } else {
723: register map_entry_type map_entry;
724:
725: displ = HBLKDISPL(p);
726: map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
727: if (map_entry == OBJ_INVALID) {
728: # ifndef ALL_INTERIOR_POINTERS
729: if (interior_ptrs) {
730: r = BASE(p);
731: displ = BYTES_TO_WORDS(HBLKDISPL(r));
732: if (r == 0) hhdr = 0;
733: } else {
734: hhdr = 0;
735: }
736: # else
737: /* map already reflects interior pointers */
738: hhdr = 0;
739: # endif
740: } else {
741: displ = BYTES_TO_WORDS(displ);
742: displ -= map_entry;
743: r = (word)((word *)(HBLKPTR(p)) + displ);
744: }
745: }
746: /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
747: /* displ is the word index within the block. */
748: if (hhdr == 0) {
749: if (interior_ptrs) {
750: # ifdef PRINT_BLACK_LIST
751: GC_add_to_black_list_stack(p, source);
752: # else
753: GC_add_to_black_list_stack(p);
754: # endif
755: } else {
756: GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
757: # undef source /* In case we had to define it. */
758: }
759: } else {
760: if (!mark_bit_from_hdr(hhdr, displ)) {
761: set_mark_bit_from_hdr(hhdr, displ);
762: GC_STORE_BACK_PTR(source, (ptr_t)r);
763: PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
764: &(GC_mark_stack[GC_mark_stack_size]));
765: }
766: }
767: }
768:
769: # ifdef TRACE_BUF
770:
771: # define TRACE_ENTRIES 1000
772:
773: struct trace_entry {
774: char * kind;
775: word gc_no;
776: word words_allocd;
777: word arg1;
778: word arg2;
779: } GC_trace_buf[TRACE_ENTRIES];
780:
781: int GC_trace_buf_ptr = 0;
782:
783: void GC_add_trace_entry(char *kind, word arg1, word arg2)
784: {
785: GC_trace_buf[GC_trace_buf_ptr].kind = kind;
786: GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
787: GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
788: GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
789: GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
790: GC_trace_buf_ptr++;
791: if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
792: }
793:
794: void GC_print_trace(word gc_no, GC_bool lock)
795: {
796: int i;
797: struct trace_entry *p;
798:
799: if (lock) LOCK();
800: for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
801: if (i < 0) i = TRACE_ENTRIES-1;
802: p = GC_trace_buf + i;
803: if (p -> gc_no < gc_no || p -> kind == 0) return;
804: printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
805: p -> kind, p -> gc_no, p -> words_allocd,
806: (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
807: }
808: printf("Trace incomplete\n");
809: if (lock) UNLOCK();
810: }
811:
812: # endif /* TRACE_BUF */
813:
814: /*
815: * A version of GC_push_all that treats all interior pointers as valid
816: * and scans the entire region immediately, in case the contents
817: * change.
818: */
819: void GC_push_all_eager(bottom, top)
820: ptr_t bottom;
821: ptr_t top;
822: {
823: word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
824: word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
825: register word *p;
826: register word q;
827: register word *lim;
828: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
829: register ptr_t least_ha = GC_least_plausible_heap_addr;
830: # define GC_greatest_plausible_heap_addr greatest_ha
831: # define GC_least_plausible_heap_addr least_ha
832:
833: if (top == 0) return;
834: /* check all pointers in range and put in push if they appear */
835: /* to be valid. */
836: lim = t - 1 /* longword */;
837: for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
838: q = *p;
839: GC_PUSH_ONE_STACK(q, p);
840: }
841: # undef GC_greatest_plausible_heap_addr
842: # undef GC_least_plausible_heap_addr
843: }
844:
845: #ifndef THREADS
846: /*
847: * A version of GC_push_all that treats all interior pointers as valid
848: * and scans part of the area immediately, to make sure that saved
849: * register values are not lost.
850: * Cold_gc_frame delimits the stack section that must be scanned
851: * eagerly. A zero value indicates that no eager scanning is needed.
852: */
853: void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
854: ptr_t bottom;
855: ptr_t top;
856: ptr_t cold_gc_frame;
857: {
858: # ifdef ALL_INTERIOR_POINTERS
859: # define EAGER_BYTES 1024
860: /* Push the hot end of the stack eagerly, so that register values */
861: /* saved inside GC frames are marked before they disappear. */
862: /* The rest of the marking can be deferred until later. */
863: if (0 == cold_gc_frame) {
864: GC_push_all_stack(bottom, top);
865: return;
866: }
867: # ifdef STACK_GROWS_DOWN
868: GC_push_all_eager(bottom, cold_gc_frame);
869: GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
870: # else /* STACK_GROWS_UP */
871: GC_push_all_eager(cold_gc_frame, top);
872: GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
873: # endif /* STACK_GROWS_UP */
874: # else
875: GC_push_all_eager(bottom, top);
876: # endif
877: # ifdef TRACE_BUF
878: GC_add_trace_entry("GC_push_all_stack", bottom, top);
879: # endif
880: }
881: #endif /* !THREADS */
882:
883: void GC_push_all_stack(bottom, top)
884: ptr_t bottom;
885: ptr_t top;
886: {
887: # ifdef ALL_INTERIOR_POINTERS
888: GC_push_all(bottom, top);
889: # else
890: GC_push_all_eager(bottom, top);
891: # endif
892: }
893:
894: #ifndef SMALL_CONFIG
895: /* Push all objects reachable from marked objects in the given block */
896: /* of size 1 objects. */
897: void GC_push_marked1(h, hhdr)
898: struct hblk *h;
899: register hdr * hhdr;
900: {
901: word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
902: register word *p;
903: word *plim;
904: register int i;
905: register word q;
906: register word mark_word;
907: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
908: register ptr_t least_ha = GC_least_plausible_heap_addr;
909: # define GC_greatest_plausible_heap_addr greatest_ha
910: # define GC_least_plausible_heap_addr least_ha
911:
912: p = (word *)(h->hb_body);
913: plim = (word *)(((word)h) + HBLKSIZE);
914:
915: /* go through all words in block */
916: while( p < plim ) {
917: mark_word = *mark_word_addr++;
918: i = 0;
919: while(mark_word != 0) {
920: if (mark_word & 1) {
921: q = p[i];
922: GC_PUSH_ONE_HEAP(q, p + i);
923: }
924: i++;
925: mark_word >>= 1;
926: }
927: p += WORDSZ;
928: }
929: # undef GC_greatest_plausible_heap_addr
930: # undef GC_least_plausible_heap_addr
931: }
932:
933:
934: #ifndef UNALIGNED
935:
936: /* Push all objects reachable from marked objects in the given block */
937: /* of size 2 objects. */
938: void GC_push_marked2(h, hhdr)
939: struct hblk *h;
940: register hdr * hhdr;
941: {
942: word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
943: register word *p;
944: word *plim;
945: register int i;
946: register word q;
947: register word mark_word;
948: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
949: register ptr_t least_ha = GC_least_plausible_heap_addr;
950: # define GC_greatest_plausible_heap_addr greatest_ha
951: # define GC_least_plausible_heap_addr least_ha
952:
953: p = (word *)(h->hb_body);
954: plim = (word *)(((word)h) + HBLKSIZE);
955:
956: /* go through all words in block */
957: while( p < plim ) {
958: mark_word = *mark_word_addr++;
959: i = 0;
960: while(mark_word != 0) {
961: if (mark_word & 1) {
962: q = p[i];
963: GC_PUSH_ONE_HEAP(q, p + i);
964: q = p[i+1];
965: GC_PUSH_ONE_HEAP(q, p + i);
966: }
967: i += 2;
968: mark_word >>= 2;
969: }
970: p += WORDSZ;
971: }
972: # undef GC_greatest_plausible_heap_addr
973: # undef GC_least_plausible_heap_addr
974: }
975:
976: /* Push all objects reachable from marked objects in the given block */
977: /* of size 4 objects. */
978: /* There is a risk of mark stack overflow here. But we handle that. */
979: /* And only unmarked objects get pushed, so it's not very likely. */
980: void GC_push_marked4(h, hhdr)
981: struct hblk *h;
982: register hdr * hhdr;
983: {
984: word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
985: register word *p;
986: word *plim;
987: register int i;
988: register word q;
989: register word mark_word;
990: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
991: register ptr_t least_ha = GC_least_plausible_heap_addr;
992: # define GC_greatest_plausible_heap_addr greatest_ha
993: # define GC_least_plausible_heap_addr least_ha
994:
995: p = (word *)(h->hb_body);
996: plim = (word *)(((word)h) + HBLKSIZE);
997:
998: /* go through all words in block */
999: while( p < plim ) {
1000: mark_word = *mark_word_addr++;
1001: i = 0;
1002: while(mark_word != 0) {
1003: if (mark_word & 1) {
1004: q = p[i];
1005: GC_PUSH_ONE_HEAP(q, p + i);
1006: q = p[i+1];
1007: GC_PUSH_ONE_HEAP(q, p + i + 1);
1008: q = p[i+2];
1009: GC_PUSH_ONE_HEAP(q, p + i + 2);
1010: q = p[i+3];
1011: GC_PUSH_ONE_HEAP(q, p + i + 3);
1012: }
1013: i += 4;
1014: mark_word >>= 4;
1015: }
1016: p += WORDSZ;
1017: }
1018: # undef GC_greatest_plausible_heap_addr
1019: # undef GC_least_plausible_heap_addr
1020: }
1021:
1022: #endif /* UNALIGNED */
1023:
1024: #endif /* SMALL_CONFIG */
1025:
1026: /* Push all objects reachable from marked objects in the given block */
1027: void GC_push_marked(h, hhdr)
1028: struct hblk *h;
1029: register hdr * hhdr;
1030: {
1031: register int sz = hhdr -> hb_sz;
1032: register word * p;
1033: register int word_no;
1034: register word * lim;
1035: register mse * GC_mark_stack_top_reg;
1036: register mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
1037:
1038: /* Some quick shortcuts: */
1039: {
1040: struct obj_kind *ok = &(GC_obj_kinds[hhdr -> hb_obj_kind]);
1041: if ((0 | DS_LENGTH) == ok -> ok_descriptor
1042: && FALSE == ok -> ok_relocate_descr)
1043: return;
1044: }
1045: if (GC_block_empty(hhdr)/* nothing marked */) return;
1046: # ifdef GATHERSTATS
1047: GC_n_rescuing_pages++;
1048: # endif
1049: GC_objects_are_marked = TRUE;
1050: if (sz > MAXOBJSZ) {
1051: lim = (word *)(h + 1);
1052: } else {
1053: lim = (word *)(h + 1) - sz;
1054: }
1055:
1056: switch(sz) {
1057: # if !defined(SMALL_CONFIG)
1058: case 1:
1059: GC_push_marked1(h, hhdr);
1060: break;
1061: # endif
1062: # if !defined(SMALL_CONFIG) && !defined(UNALIGNED)
1063: case 2:
1064: GC_push_marked2(h, hhdr);
1065: break;
1066: case 4:
1067: GC_push_marked4(h, hhdr);
1068: break;
1069: # endif
1070: default:
1071: GC_mark_stack_top_reg = GC_mark_stack_top;
1072: for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
1073: p += sz, word_no += sz) {
1074: /* This ignores user specified mark procs. This currently */
1075: /* doesn't matter, since marking from the whole object */
1076: /* is always sufficient, and we will eventually use the user */
1077: /* mark proc to avoid any bogus pointers. */
1078: if (mark_bit_from_hdr(hhdr, word_no)) {
1079: /* Mark from fields inside the object */
1080: PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1081: # ifdef GATHERSTATS
1082: /* Subtract this object from total, since it was */
1083: /* added in twice. */
1084: GC_composite_in_use -= sz;
1085: # endif
1086: }
1087: }
1088: GC_mark_stack_top = GC_mark_stack_top_reg;
1089: }
1090: }
1091:
1092: #ifndef SMALL_CONFIG
1093: /* Test whether any page in the given block is dirty */
1094: GC_bool GC_block_was_dirty(h, hhdr)
1095: struct hblk *h;
1096: register hdr * hhdr;
1097: {
1098: register int sz = hhdr -> hb_sz;
1099:
1100: if (sz < MAXOBJSZ) {
1101: return(GC_page_was_dirty(h));
1102: } else {
1103: register ptr_t p = (ptr_t)h;
1104: sz += HDR_WORDS;
1105: sz = WORDS_TO_BYTES(sz);
1106: while (p < (ptr_t)h + sz) {
1107: if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1108: p += HBLKSIZE;
1109: }
1110: return(FALSE);
1111: }
1112: }
1113: #endif /* SMALL_CONFIG */
1114:
1115: /* Similar to GC_push_next_marked, but return address of next block */
1116: struct hblk * GC_push_next_marked(h)
1117: struct hblk *h;
1118: {
1119: register hdr * hhdr;
1120:
1121: h = GC_next_used_block(h);
1122: if (h == 0) return(0);
1123: hhdr = HDR(h);
1124: GC_push_marked(h, hhdr);
1125: return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1126: }
1127:
1128: #ifndef SMALL_CONFIG
1129: /* Identical to above, but mark only from dirty pages */
1130: struct hblk * GC_push_next_marked_dirty(h)
1131: struct hblk *h;
1132: {
1.2 ! noro 1133: register hdr * hhdr;
1.1 noro 1134:
1135: if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1136: for (;;) {
1137: h = GC_next_used_block(h);
1138: if (h == 0) return(0);
1139: hhdr = HDR(h);
1140: # ifdef STUBBORN_ALLOC
1141: if (hhdr -> hb_obj_kind == STUBBORN) {
1142: if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1143: break;
1144: }
1145: } else {
1146: if (GC_block_was_dirty(h, hhdr)) break;
1147: }
1148: # else
1149: if (GC_block_was_dirty(h, hhdr)) break;
1150: # endif
1151: h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1152: }
1153: GC_push_marked(h, hhdr);
1154: return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1155: }
1156: #endif
1157:
1158: /* Similar to above, but for uncollectable pages. Needed since we */
1159: /* do not clear marks for such pages, even for full collections. */
1160: struct hblk * GC_push_next_marked_uncollectable(h)
1161: struct hblk *h;
1162: {
1163: register hdr * hhdr = HDR(h);
1164:
1165: for (;;) {
1166: h = GC_next_used_block(h);
1167: if (h == 0) return(0);
1168: hhdr = HDR(h);
1169: if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1170: h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1171: }
1172: GC_push_marked(h, hhdr);
1173: return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1174: }
1175:
1176:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>