Annotation of OpenXM_contrib2/asir2000/gc/mark.c, Revision 1.1.1.1
1.1 noro 1:
2: /*
3: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4: * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
5: *
6: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8: *
9: * Permission is hereby granted to use or copy this program
10: * for any purpose, provided the above notices are retained on all copies.
11: * Permission to modify the code and to distribute modified code is granted,
12: * provided the above notices are retained, and a notice that the code was
13: * modified is included with the above copyright notice.
14: *
15: */
16:
17:
18: # include <stdio.h>
19: # include "gc_priv.h"
20: # include "gc_mark.h"
21:
22: /* We put this here to minimize the risk of inlining. */
23: /*VARARGS*/
24: #ifdef __WATCOMC__
25: void GC_noop(void *p, ...) {}
26: #else
27: void GC_noop() {}
28: #endif
29:
30: /* Single argument version, robust against whole program analysis. */
31: void GC_noop1(x)
32: word x;
33: {
34: static VOLATILE word sink;
35:
36: sink = x;
37: }
38:
39: /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
40:
41: word GC_n_mark_procs = 0;
42:
43: /* Initialize GC_obj_kinds properly and standard free lists properly. */
44: /* This must be done statically since they may be accessed before */
45: /* GC_init is called. */
46: /* It's done here, since we need to deal with mark descriptors. */
47: struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
48: /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
49: 0 | DS_LENGTH, FALSE, FALSE },
50: /* NORMAL */ { &GC_objfreelist[0], 0,
51: # if defined(ADD_BYTE_AT_END) && ALIGNMENT > DS_TAGS
52: (word)(-ALIGNMENT) | DS_LENGTH,
53: # else
54: 0 | DS_LENGTH,
55: # endif
56: TRUE /* add length to descr */, TRUE },
57: /* UNCOLLECTABLE */
58: { &GC_uobjfreelist[0], 0,
59: 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
60: # ifdef ATOMIC_UNCOLLECTABLE
61: /* AUNCOLLECTABLE */
62: { &GC_auobjfreelist[0], 0,
63: 0 | DS_LENGTH, FALSE /* add length to descr */, FALSE },
64: # endif
65: # ifdef STUBBORN_ALLOC
66: /*STUBBORN*/ { &GC_sobjfreelist[0], 0,
67: 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
68: # endif
69: };
70:
71: # ifdef ATOMIC_UNCOLLECTABLE
72: # ifdef STUBBORN_ALLOC
73: int GC_n_kinds = 5;
74: # else
75: int GC_n_kinds = 4;
76: # endif
77: # else
78: # ifdef STUBBORN_ALLOC
79: int GC_n_kinds = 4;
80: # else
81: int GC_n_kinds = 3;
82: # endif
83: # endif
84:
85:
86: # ifndef INITIAL_MARK_STACK_SIZE
87: # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
88: /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
89: /* multiple of HBLKSIZE. */
90: # endif
91:
92: /*
93: * Limits of stack for GC_mark routine.
94: * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
95: * need to be marked from.
96: */
97:
98: word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
99: /* excludes ptrfree pages, etc. */
100:
101: mse * GC_mark_stack;
102:
103: word GC_mark_stack_size = 0;
104:
105: mse * GC_mark_stack_top;
106:
107: static struct hblk * scan_ptr;
108:
109: mark_state_t GC_mark_state = MS_NONE;
110:
111: GC_bool GC_mark_stack_too_small = FALSE;
112:
113: GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
114: /* objects in the heap? */
115:
116: /* Is a collection in progress? Note that this can return true in the */
117: /* nonincremental case, if a collection has been abandoned and the */
118: /* mark state is now MS_INVALID. */
119: GC_bool GC_collection_in_progress()
120: {
121: return(GC_mark_state != MS_NONE);
122: }
123:
124: /* clear all mark bits in the header */
125: void GC_clear_hdr_marks(hhdr)
126: register hdr * hhdr;
127: {
128: BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
129: }
130:
131: /* Set all mark bits in the header. Used for uncollectable blocks. */
132: void GC_set_hdr_marks(hhdr)
133: register hdr * hhdr;
134: {
135: register int i;
136:
137: for (i = 0; i < MARK_BITS_SZ; ++i) {
138: hhdr -> hb_marks[i] = ONES;
139: }
140: }
141:
142: /*
143: * Clear all mark bits associated with block h.
144: */
145: /*ARGSUSED*/
146: static void clear_marks_for_block(h, dummy)
147: struct hblk *h;
148: word dummy;
149: {
150: register hdr * hhdr = HDR(h);
151:
152: if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
153: /* Mark bit for these is cleared only once the object is */
154: /* explicitly deallocated. This either frees the block, or */
155: /* the bit is cleared once the object is on the free list. */
156: GC_clear_hdr_marks(hhdr);
157: }
158:
159: /* Slow but general routines for setting/clearing/asking about mark bits */
160: void GC_set_mark_bit(p)
161: ptr_t p;
162: {
163: register struct hblk *h = HBLKPTR(p);
164: register hdr * hhdr = HDR(h);
165: register int word_no = (word *)p - (word *)h;
166:
167: set_mark_bit_from_hdr(hhdr, word_no);
168: }
169:
170: void GC_clear_mark_bit(p)
171: ptr_t p;
172: {
173: register struct hblk *h = HBLKPTR(p);
174: register hdr * hhdr = HDR(h);
175: register int word_no = (word *)p - (word *)h;
176:
177: clear_mark_bit_from_hdr(hhdr, word_no);
178: }
179:
180: GC_bool GC_is_marked(p)
181: ptr_t p;
182: {
183: register struct hblk *h = HBLKPTR(p);
184: register hdr * hhdr = HDR(h);
185: register int word_no = (word *)p - (word *)h;
186:
187: return(mark_bit_from_hdr(hhdr, word_no));
188: }
189:
190:
191: /*
192: * Clear mark bits in all allocated heap blocks. This invalidates
193: * the marker invariant, and sets GC_mark_state to reflect this.
194: * (This implicitly starts marking to reestablish the invariant.)
195: */
196: void GC_clear_marks()
197: {
198: GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
199: GC_objects_are_marked = FALSE;
200: GC_mark_state = MS_INVALID;
201: scan_ptr = 0;
202: # ifdef GATHERSTATS
203: /* Counters reflect currently marked objects: reset here */
204: GC_composite_in_use = 0;
205: GC_atomic_in_use = 0;
206: # endif
207:
208: }
209:
210: /* Initiate a garbage collection. Initiates a full collection if the */
211: /* mark state is invalid. */
212: /*ARGSUSED*/
213: void GC_initiate_gc()
214: {
215: if (GC_dirty_maintained) GC_read_dirty();
216: # ifdef STUBBORN_ALLOC
217: GC_read_changed();
218: # endif
219: # ifdef CHECKSUMS
220: {
221: extern void GC_check_dirty();
222:
223: if (GC_dirty_maintained) GC_check_dirty();
224: }
225: # endif
226: # ifdef GATHERSTATS
227: GC_n_rescuing_pages = 0;
228: # endif
229: if (GC_mark_state == MS_NONE) {
230: GC_mark_state = MS_PUSH_RESCUERS;
231: } else if (GC_mark_state != MS_INVALID) {
232: ABORT("unexpected state");
233: } /* else this is really a full collection, and mark */
234: /* bits are invalid. */
235: scan_ptr = 0;
236: }
237:
238:
239: static void alloc_mark_stack();
240:
241: /* Perform a small amount of marking. */
242: /* We try to touch roughly a page of memory. */
243: /* Return TRUE if we just finished a mark phase. */
244: /* Cold_gc_frame is an address inside a GC frame that */
245: /* remains valid until all marking is complete. */
246: /* A zero value indicates that it's OK to miss some */
247: /* register values. */
248: GC_bool GC_mark_some(cold_gc_frame)
249: ptr_t cold_gc_frame;
250: {
251: switch(GC_mark_state) {
252: case MS_NONE:
253: return(FALSE);
254:
255: case MS_PUSH_RESCUERS:
256: if (GC_mark_stack_top
257: >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
258: GC_mark_from_mark_stack();
259: return(FALSE);
260: } else {
261: scan_ptr = GC_push_next_marked_dirty(scan_ptr);
262: if (scan_ptr == 0) {
263: # ifdef PRINTSTATS
264: GC_printf1("Marked from %lu dirty pages\n",
265: (unsigned long)GC_n_rescuing_pages);
266: # endif
267: GC_push_roots(FALSE, cold_gc_frame);
268: GC_objects_are_marked = TRUE;
269: if (GC_mark_state != MS_INVALID) {
270: GC_mark_state = MS_ROOTS_PUSHED;
271: }
272: }
273: }
274: return(FALSE);
275:
276: case MS_PUSH_UNCOLLECTABLE:
277: if (GC_mark_stack_top
278: >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
279: GC_mark_from_mark_stack();
280: return(FALSE);
281: } else {
282: scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
283: if (scan_ptr == 0) {
284: GC_push_roots(TRUE, cold_gc_frame);
285: GC_objects_are_marked = TRUE;
286: if (GC_mark_state != MS_INVALID) {
287: GC_mark_state = MS_ROOTS_PUSHED;
288: }
289: }
290: }
291: return(FALSE);
292:
293: case MS_ROOTS_PUSHED:
294: if (GC_mark_stack_top >= GC_mark_stack) {
295: GC_mark_from_mark_stack();
296: return(FALSE);
297: } else {
298: GC_mark_state = MS_NONE;
299: if (GC_mark_stack_too_small) {
300: alloc_mark_stack(2*GC_mark_stack_size);
301: }
302: return(TRUE);
303: }
304:
305: case MS_INVALID:
306: case MS_PARTIALLY_INVALID:
307: if (!GC_objects_are_marked) {
308: GC_mark_state = MS_PUSH_UNCOLLECTABLE;
309: return(FALSE);
310: }
311: if (GC_mark_stack_top >= GC_mark_stack) {
312: GC_mark_from_mark_stack();
313: return(FALSE);
314: }
315: if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
316: /* About to start a heap scan for marked objects. */
317: /* Mark stack is empty. OK to reallocate. */
318: if (GC_mark_stack_too_small) {
319: alloc_mark_stack(2*GC_mark_stack_size);
320: }
321: GC_mark_state = MS_PARTIALLY_INVALID;
322: }
323: scan_ptr = GC_push_next_marked(scan_ptr);
324: if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
325: GC_push_roots(TRUE, cold_gc_frame);
326: GC_objects_are_marked = TRUE;
327: if (GC_mark_state != MS_INVALID) {
328: GC_mark_state = MS_ROOTS_PUSHED;
329: }
330: }
331: return(FALSE);
332: default:
333: ABORT("GC_mark_some: bad state");
334: return(FALSE);
335: }
336: }
337:
338:
339: GC_bool GC_mark_stack_empty()
340: {
341: return(GC_mark_stack_top < GC_mark_stack);
342: }
343:
344: #ifdef PROF_MARKER
345: word GC_prof_array[10];
346: # define PROF(n) GC_prof_array[n]++
347: #else
348: # define PROF(n)
349: #endif
350:
351: /* Given a pointer to someplace other than a small object page or the */
352: /* first page of a large object, return a pointer either to the */
353: /* start of the large object or NIL. */
354: /* In the latter case black list the address current. */
355: /* Returns NIL without black listing if current points to a block */
356: /* with IGNORE_OFF_PAGE set. */
357: /*ARGSUSED*/
358: # ifdef PRINT_BLACK_LIST
359: word GC_find_start(current, hhdr, source)
360: word source;
361: # else
362: word GC_find_start(current, hhdr)
363: # define source 0
364: # endif
365: register word current;
366: register hdr * hhdr;
367: {
368: # ifdef ALL_INTERIOR_POINTERS
369: if (hhdr != 0) {
370: register word orig = current;
371:
372: current = (word)HBLKPTR(current) + HDR_BYTES;
373: do {
374: current = current - HBLKSIZE*(word)hhdr;
375: hhdr = HDR(current);
376: } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
377: /* current points to the start of the large object */
378: if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(0);
379: if ((word *)orig - (word *)current
380: >= (ptrdiff_t)(hhdr->hb_sz)) {
381: /* Pointer past the end of the block */
382: GC_ADD_TO_BLACK_LIST_NORMAL(orig, source);
383: return(0);
384: }
385: return(current);
386: } else {
387: GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
388: return(0);
389: }
390: # else
391: GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
392: return(0);
393: # endif
394: # undef source
395: }
396:
397: void GC_invalidate_mark_state()
398: {
399: GC_mark_state = MS_INVALID;
400: GC_mark_stack_top = GC_mark_stack-1;
401: }
402:
403: mse * GC_signal_mark_stack_overflow(msp)
404: mse * msp;
405: {
406: GC_mark_state = MS_INVALID;
407: GC_mark_stack_too_small = TRUE;
408: # ifdef PRINTSTATS
409: GC_printf1("Mark stack overflow; current size = %lu entries\n",
410: GC_mark_stack_size);
411: # endif
412: return(msp-INITIAL_MARK_STACK_SIZE/8);
413: }
414:
415:
416: /*
417: * Mark objects pointed to by the regions described by
418: * mark stack entries between GC_mark_stack and GC_mark_stack_top,
419: * inclusive. Assumes the upper limit of a mark stack entry
420: * is never 0. A mark stack entry never has size 0.
421: * We try to traverse on the order of a hblk of memory before we return.
422: * Caller is responsible for calling this until the mark stack is empty.
423: */
424: void GC_mark_from_mark_stack()
425: {
426: mse * GC_mark_stack_reg = GC_mark_stack;
427: mse * GC_mark_stack_top_reg = GC_mark_stack_top;
428: mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
429: int credit = HBLKSIZE; /* Remaining credit for marking work */
430: register word * current_p; /* Pointer to current candidate ptr. */
431: register word current; /* Candidate pointer. */
432: register word * limit; /* (Incl) limit of current candidate */
433: /* range */
434: register word descr;
435: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
436: register ptr_t least_ha = GC_least_plausible_heap_addr;
437: # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
438:
439: GC_objects_are_marked = TRUE;
440: # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
441: while (GC_mark_stack_top_reg >= GC_mark_stack_reg && credit >= 0) {
442: # else
443: while ((((ptr_t)GC_mark_stack_top_reg - (ptr_t)GC_mark_stack_reg) | credit)
444: >= 0) {
445: # endif
446: current_p = GC_mark_stack_top_reg -> mse_start;
447: retry:
448: descr = GC_mark_stack_top_reg -> mse_descr;
449: if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | DS_TAGS)) {
450: word tag = descr & DS_TAGS;
451:
452: switch(tag) {
453: case DS_LENGTH:
454: /* Large length. */
455: /* Process part of the range to avoid pushing too much on the */
456: /* stack. */
457: GC_mark_stack_top_reg -> mse_start =
458: limit = current_p + SPLIT_RANGE_WORDS-1;
459: GC_mark_stack_top_reg -> mse_descr -=
460: WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
461: /* Make sure that pointers overlapping the two ranges are */
462: /* considered. */
463: limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
464: break;
465: case DS_BITMAP:
466: GC_mark_stack_top_reg--;
467: descr &= ~DS_TAGS;
468: credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
469: while (descr != 0) {
470: if ((signed_word)descr < 0) {
471: current = *current_p;
472: if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
473: PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit,
474: current_p, exit1);
475: }
476: }
477: descr <<= 1;
478: ++ current_p;
479: }
480: continue;
481: case DS_PROC:
482: GC_mark_stack_top_reg--;
483: credit -= PROC_BYTES;
484: GC_mark_stack_top_reg =
485: (*PROC(descr))
486: (current_p, GC_mark_stack_top_reg,
487: mark_stack_limit, ENV(descr));
488: continue;
489: case DS_PER_OBJECT:
490: GC_mark_stack_top_reg -> mse_descr =
491: *(word *)((ptr_t)current_p + descr - tag);
492: goto retry;
493: }
494: } else {
495: GC_mark_stack_top_reg--;
496: limit = (word *)(((ptr_t)current_p) + (word)descr);
497: }
498: /* The simple case in which we're scanning a range. */
499: credit -= (ptr_t)limit - (ptr_t)current_p;
500: limit -= 1;
501: while (current_p <= limit) {
502: current = *current_p;
503: if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
504: PUSH_CONTENTS(current, GC_mark_stack_top_reg,
505: mark_stack_limit, current_p, exit2);
506: }
507: current_p = (word *)((char *)current_p + ALIGNMENT);
508: }
509: }
510: GC_mark_stack_top = GC_mark_stack_top_reg;
511: }
512:
513: /* Allocate or reallocate space for mark stack of size s words */
514: /* May silently fail. */
515: static void alloc_mark_stack(n)
516: word n;
517: {
518: mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct ms_entry));
519:
520: GC_mark_stack_too_small = FALSE;
521: if (GC_mark_stack_size != 0) {
522: if (new_stack != 0) {
523: word displ = (word)GC_mark_stack & (GC_page_size - 1);
524: signed_word size = GC_mark_stack_size * sizeof(struct ms_entry);
525:
526: /* Recycle old space */
527: if (0 != displ) displ = GC_page_size - displ;
528: size = (size - displ) & ~(GC_page_size - 1);
529: if (size > 0) {
530: GC_add_to_heap((struct hblk *)
531: ((word)GC_mark_stack + displ), (word)size);
532: }
533: GC_mark_stack = new_stack;
534: GC_mark_stack_size = n;
535: # ifdef PRINTSTATS
536: GC_printf1("Grew mark stack to %lu frames\n",
537: (unsigned long) GC_mark_stack_size);
538: # endif
539: } else {
540: # ifdef PRINTSTATS
541: GC_printf1("Failed to grow mark stack to %lu frames\n",
542: (unsigned long) n);
543: # endif
544: }
545: } else {
546: if (new_stack == 0) {
547: GC_err_printf0("No space for mark stack\n");
548: EXIT();
549: }
550: GC_mark_stack = new_stack;
551: GC_mark_stack_size = n;
552: }
553: GC_mark_stack_top = GC_mark_stack-1;
554: }
555:
556: void GC_mark_init()
557: {
558: alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
559: }
560:
561: /*
562: * Push all locations between b and t onto the mark stack.
563: * b is the first location to be checked. t is one past the last
564: * location to be checked.
565: * Should only be used if there is no possibility of mark stack
566: * overflow.
567: */
568: void GC_push_all(bottom, top)
569: ptr_t bottom;
570: ptr_t top;
571: {
572: register word length;
573:
574: bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
575: top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
576: if (top == 0 || bottom == top) return;
577: GC_mark_stack_top++;
578: if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
579: ABORT("unexpected mark stack overflow");
580: }
581: length = top - bottom;
582: # if DS_TAGS > ALIGNMENT - 1
583: length += DS_TAGS;
584: length &= ~DS_TAGS;
585: # endif
586: GC_mark_stack_top -> mse_start = (word *)bottom;
587: GC_mark_stack_top -> mse_descr = length;
588: }
589:
590: /*
591: * Analogous to the above, but push only those pages that may have been
592: * dirtied. A block h is assumed dirty if dirty_fn(h) != 0.
593: * We use push_fn to actually push the block.
594: * Will not overflow mark stack if push_fn pushes a small fixed number
595: * of entries. (This is invoked only if push_fn pushes a single entry,
596: * or if it marks each object before pushing it, thus ensuring progress
597: * in the event of a stack overflow.)
598: */
599: void GC_push_dirty(bottom, top, dirty_fn, push_fn)
600: ptr_t bottom;
601: ptr_t top;
602: int (*dirty_fn)(/* struct hblk * h */);
603: void (*push_fn)(/* ptr_t bottom, ptr_t top */);
604: {
605: register struct hblk * h;
606:
607: bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
608: top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
609:
610: if (top == 0 || bottom == top) return;
611: h = HBLKPTR(bottom + HBLKSIZE);
612: if (top <= (ptr_t) h) {
613: if ((*dirty_fn)(h-1)) {
614: (*push_fn)(bottom, top);
615: }
616: return;
617: }
618: if ((*dirty_fn)(h-1)) {
619: (*push_fn)(bottom, (ptr_t)h);
620: }
621: while ((ptr_t)(h+1) <= top) {
622: if ((*dirty_fn)(h)) {
623: if ((word)(GC_mark_stack_top - GC_mark_stack)
624: > 3 * GC_mark_stack_size / 4) {
625: /* Danger of mark stack overflow */
626: (*push_fn)((ptr_t)h, top);
627: return;
628: } else {
629: (*push_fn)((ptr_t)h, (ptr_t)(h+1));
630: }
631: }
632: h++;
633: }
634: if ((ptr_t)h != top) {
635: if ((*dirty_fn)(h)) {
636: (*push_fn)((ptr_t)h, top);
637: }
638: }
639: if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
640: ABORT("unexpected mark stack overflow");
641: }
642: }
643:
644: # ifndef SMALL_CONFIG
645: void GC_push_conditional(bottom, top, all)
646: ptr_t bottom;
647: ptr_t top;
648: int all;
649: {
650: if (all) {
651: if (GC_dirty_maintained) {
652: # ifdef PROC_VDB
653: /* Pages that were never dirtied cannot contain pointers */
654: GC_push_dirty(bottom, top, GC_page_was_ever_dirty, GC_push_all);
655: # else
656: GC_push_all(bottom, top);
657: # endif
658: } else {
659: GC_push_all(bottom, top);
660: }
661: } else {
662: GC_push_dirty(bottom, top, GC_page_was_dirty, GC_push_all);
663: }
664: }
665: #endif
666:
667: # ifdef MSWIN32
668: void __cdecl GC_push_one(p)
669: # else
670: void GC_push_one(p)
671: # endif
672: word p;
673: {
674: GC_PUSH_ONE_STACK(p, 0);
675: }
676:
677: # ifdef __STDC__
678: # define BASE(p) (word)GC_base((void *)(p))
679: # else
680: # define BASE(p) (word)GC_base((char *)(p))
681: # endif
682:
683: /* As above, but argument passed preliminary test. */
684: # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
685: void GC_push_one_checked(p, interior_ptrs, source)
686: ptr_t source;
687: # else
688: void GC_push_one_checked(p, interior_ptrs)
689: # define source 0
690: # endif
691: register word p;
692: register GC_bool interior_ptrs;
693: {
694: register word r;
695: register hdr * hhdr;
696: register int displ;
697:
698: GET_HDR(p, hhdr);
699: if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
700: if (hhdr != 0 && interior_ptrs) {
701: r = BASE(p);
702: hhdr = HDR(r);
703: displ = BYTES_TO_WORDS(HBLKDISPL(r));
704: } else {
705: hhdr = 0;
706: }
707: } else {
708: register map_entry_type map_entry;
709:
710: displ = HBLKDISPL(p);
711: map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
712: if (map_entry == OBJ_INVALID) {
713: # ifndef ALL_INTERIOR_POINTERS
714: if (interior_ptrs) {
715: r = BASE(p);
716: displ = BYTES_TO_WORDS(HBLKDISPL(r));
717: if (r == 0) hhdr = 0;
718: } else {
719: hhdr = 0;
720: }
721: # else
722: /* map already reflects interior pointers */
723: hhdr = 0;
724: # endif
725: } else {
726: displ = BYTES_TO_WORDS(displ);
727: displ -= map_entry;
728: r = (word)((word *)(HBLKPTR(p)) + displ);
729: }
730: }
731: /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
732: /* displ is the word index within the block. */
733: if (hhdr == 0) {
734: if (interior_ptrs) {
735: # ifdef PRINT_BLACK_LIST
736: GC_add_to_black_list_stack(p, source);
737: # else
738: GC_add_to_black_list_stack(p);
739: # endif
740: } else {
741: GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
742: # undef source /* In case we had to define it. */
743: }
744: } else {
745: if (!mark_bit_from_hdr(hhdr, displ)) {
746: set_mark_bit_from_hdr(hhdr, displ);
747: GC_STORE_BACK_PTR(source, (ptr_t)r);
748: PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
749: &(GC_mark_stack[GC_mark_stack_size]));
750: }
751: }
752: }
753:
754: # ifdef TRACE_BUF
755:
756: # define TRACE_ENTRIES 1000
757:
758: struct trace_entry {
759: char * kind;
760: word gc_no;
761: word words_allocd;
762: word arg1;
763: word arg2;
764: } GC_trace_buf[TRACE_ENTRIES];
765:
766: int GC_trace_buf_ptr = 0;
767:
768: void GC_add_trace_entry(char *kind, word arg1, word arg2)
769: {
770: GC_trace_buf[GC_trace_buf_ptr].kind = kind;
771: GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
772: GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
773: GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
774: GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
775: GC_trace_buf_ptr++;
776: if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
777: }
778:
779: void GC_print_trace(word gc_no, GC_bool lock)
780: {
781: int i;
782: struct trace_entry *p;
783:
784: if (lock) LOCK();
785: for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
786: if (i < 0) i = TRACE_ENTRIES-1;
787: p = GC_trace_buf + i;
788: if (p -> gc_no < gc_no || p -> kind == 0) return;
789: printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
790: p -> kind, p -> gc_no, p -> words_allocd,
791: (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
792: }
793: printf("Trace incomplete\n");
794: if (lock) UNLOCK();
795: }
796:
797: # endif /* TRACE_BUF */
798:
799: /*
800: * A version of GC_push_all that treats all interior pointers as valid
801: * and scans the entire region immediately, in case the contents
802: * change.
803: */
804: void GC_push_all_eager(bottom, top)
805: ptr_t bottom;
806: ptr_t top;
807: {
808: word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
809: word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
810: register word *p;
811: register word q;
812: register word *lim;
813: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
814: register ptr_t least_ha = GC_least_plausible_heap_addr;
815: # define GC_greatest_plausible_heap_addr greatest_ha
816: # define GC_least_plausible_heap_addr least_ha
817:
818: if (top == 0) return;
819: /* check all pointers in range and put in push if they appear */
820: /* to be valid. */
821: lim = t - 1 /* longword */;
822: for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
823: q = *p;
824: GC_PUSH_ONE_STACK(q, p);
825: }
826: # undef GC_greatest_plausible_heap_addr
827: # undef GC_least_plausible_heap_addr
828: }
829:
830: #ifndef THREADS
831: /*
832: * A version of GC_push_all that treats all interior pointers as valid
833: * and scans part of the area immediately, to make sure that saved
834: * register values are not lost.
835: * Cold_gc_frame delimits the stack section that must be scanned
836: * eagerly. A zero value indicates that no eager scanning is needed.
837: */
838: void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
839: ptr_t bottom;
840: ptr_t top;
841: ptr_t cold_gc_frame;
842: {
843: # ifdef ALL_INTERIOR_POINTERS
844: # define EAGER_BYTES 1024
845: /* Push the hot end of the stack eagerly, so that register values */
846: /* saved inside GC frames are marked before they disappear. */
847: /* The rest of the marking can be deferred until later. */
848: if (0 == cold_gc_frame) {
849: GC_push_all_stack(bottom, top);
850: return;
851: }
852: # ifdef STACK_GROWS_DOWN
853: GC_push_all_eager(bottom, cold_gc_frame);
854: GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
855: # else /* STACK_GROWS_UP */
856: GC_push_all_eager(cold_gc_frame, top);
857: GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
858: # endif /* STACK_GROWS_UP */
859: # else
860: GC_push_all_eager(bottom, top);
861: # endif
862: # ifdef TRACE_BUF
863: GC_add_trace_entry("GC_push_all_stack", bottom, top);
864: # endif
865: }
866: #endif /* !THREADS */
867:
868: void GC_push_all_stack(bottom, top)
869: ptr_t bottom;
870: ptr_t top;
871: {
872: # ifdef ALL_INTERIOR_POINTERS
873: GC_push_all(bottom, top);
874: # else
875: GC_push_all_eager(bottom, top);
876: # endif
877: }
878:
879: #ifndef SMALL_CONFIG
880: /* Push all objects reachable from marked objects in the given block */
881: /* of size 1 objects. */
882: void GC_push_marked1(h, hhdr)
883: struct hblk *h;
884: register hdr * hhdr;
885: {
886: word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
887: register word *p;
888: word *plim;
889: register int i;
890: register word q;
891: register word mark_word;
892: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
893: register ptr_t least_ha = GC_least_plausible_heap_addr;
894: # define GC_greatest_plausible_heap_addr greatest_ha
895: # define GC_least_plausible_heap_addr least_ha
896:
897: p = (word *)(h->hb_body);
898: plim = (word *)(((word)h) + HBLKSIZE);
899:
900: /* go through all words in block */
901: while( p < plim ) {
902: mark_word = *mark_word_addr++;
903: i = 0;
904: while(mark_word != 0) {
905: if (mark_word & 1) {
906: q = p[i];
907: GC_PUSH_ONE_HEAP(q, p + i);
908: }
909: i++;
910: mark_word >>= 1;
911: }
912: p += WORDSZ;
913: }
914: # undef GC_greatest_plausible_heap_addr
915: # undef GC_least_plausible_heap_addr
916: }
917:
918:
919: #ifndef UNALIGNED
920:
921: /* Push all objects reachable from marked objects in the given block */
922: /* of size 2 objects. */
923: void GC_push_marked2(h, hhdr)
924: struct hblk *h;
925: register hdr * hhdr;
926: {
927: word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
928: register word *p;
929: word *plim;
930: register int i;
931: register word q;
932: register word mark_word;
933: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
934: register ptr_t least_ha = GC_least_plausible_heap_addr;
935: # define GC_greatest_plausible_heap_addr greatest_ha
936: # define GC_least_plausible_heap_addr least_ha
937:
938: p = (word *)(h->hb_body);
939: plim = (word *)(((word)h) + HBLKSIZE);
940:
941: /* go through all words in block */
942: while( p < plim ) {
943: mark_word = *mark_word_addr++;
944: i = 0;
945: while(mark_word != 0) {
946: if (mark_word & 1) {
947: q = p[i];
948: GC_PUSH_ONE_HEAP(q, p + i);
949: q = p[i+1];
950: GC_PUSH_ONE_HEAP(q, p + i);
951: }
952: i += 2;
953: mark_word >>= 2;
954: }
955: p += WORDSZ;
956: }
957: # undef GC_greatest_plausible_heap_addr
958: # undef GC_least_plausible_heap_addr
959: }
960:
961: /* Push all objects reachable from marked objects in the given block */
962: /* of size 4 objects. */
963: /* There is a risk of mark stack overflow here. But we handle that. */
964: /* And only unmarked objects get pushed, so it's not very likely. */
965: void GC_push_marked4(h, hhdr)
966: struct hblk *h;
967: register hdr * hhdr;
968: {
969: word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
970: register word *p;
971: word *plim;
972: register int i;
973: register word q;
974: register word mark_word;
975: register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
976: register ptr_t least_ha = GC_least_plausible_heap_addr;
977: # define GC_greatest_plausible_heap_addr greatest_ha
978: # define GC_least_plausible_heap_addr least_ha
979:
980: p = (word *)(h->hb_body);
981: plim = (word *)(((word)h) + HBLKSIZE);
982:
983: /* go through all words in block */
984: while( p < plim ) {
985: mark_word = *mark_word_addr++;
986: i = 0;
987: while(mark_word != 0) {
988: if (mark_word & 1) {
989: q = p[i];
990: GC_PUSH_ONE_HEAP(q, p + i);
991: q = p[i+1];
992: GC_PUSH_ONE_HEAP(q, p + i + 1);
993: q = p[i+2];
994: GC_PUSH_ONE_HEAP(q, p + i + 2);
995: q = p[i+3];
996: GC_PUSH_ONE_HEAP(q, p + i + 3);
997: }
998: i += 4;
999: mark_word >>= 4;
1000: }
1001: p += WORDSZ;
1002: }
1003: # undef GC_greatest_plausible_heap_addr
1004: # undef GC_least_plausible_heap_addr
1005: }
1006:
1007: #endif /* UNALIGNED */
1008:
1009: #endif /* SMALL_CONFIG */
1010:
1011: /* Push all objects reachable from marked objects in the given block */
1012: void GC_push_marked(h, hhdr)
1013: struct hblk *h;
1014: register hdr * hhdr;
1015: {
1016: register int sz = hhdr -> hb_sz;
1017: register word * p;
1018: register int word_no;
1019: register word * lim;
1020: register mse * GC_mark_stack_top_reg;
1021: register mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
1022:
1023: /* Some quick shortcuts: */
1024: {
1025: struct obj_kind *ok = &(GC_obj_kinds[hhdr -> hb_obj_kind]);
1026: if ((0 | DS_LENGTH) == ok -> ok_descriptor
1027: && FALSE == ok -> ok_relocate_descr)
1028: return;
1029: }
1030: if (GC_block_empty(hhdr)/* nothing marked */) return;
1031: # ifdef GATHERSTATS
1032: GC_n_rescuing_pages++;
1033: # endif
1034: GC_objects_are_marked = TRUE;
1035: if (sz > MAXOBJSZ) {
1036: lim = (word *)(h + 1);
1037: } else {
1038: lim = (word *)(h + 1) - sz;
1039: }
1040:
1041: switch(sz) {
1042: # if !defined(SMALL_CONFIG)
1043: case 1:
1044: GC_push_marked1(h, hhdr);
1045: break;
1046: # endif
1047: # if !defined(SMALL_CONFIG) && !defined(UNALIGNED)
1048: case 2:
1049: GC_push_marked2(h, hhdr);
1050: break;
1051: case 4:
1052: GC_push_marked4(h, hhdr);
1053: break;
1054: # endif
1055: default:
1056: GC_mark_stack_top_reg = GC_mark_stack_top;
1057: for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
1058: p += sz, word_no += sz) {
1059: /* This ignores user specified mark procs. This currently */
1060: /* doesn't matter, since marking from the whole object */
1061: /* is always sufficient, and we will eventually use the user */
1062: /* mark proc to avoid any bogus pointers. */
1063: if (mark_bit_from_hdr(hhdr, word_no)) {
1064: /* Mark from fields inside the object */
1065: PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1066: # ifdef GATHERSTATS
1067: /* Subtract this object from total, since it was */
1068: /* added in twice. */
1069: GC_composite_in_use -= sz;
1070: # endif
1071: }
1072: }
1073: GC_mark_stack_top = GC_mark_stack_top_reg;
1074: }
1075: }
1076:
1077: #ifndef SMALL_CONFIG
1078: /* Test whether any page in the given block is dirty */
1079: GC_bool GC_block_was_dirty(h, hhdr)
1080: struct hblk *h;
1081: register hdr * hhdr;
1082: {
1083: register int sz = hhdr -> hb_sz;
1084:
1085: if (sz < MAXOBJSZ) {
1086: return(GC_page_was_dirty(h));
1087: } else {
1088: register ptr_t p = (ptr_t)h;
1089: sz += HDR_WORDS;
1090: sz = WORDS_TO_BYTES(sz);
1091: while (p < (ptr_t)h + sz) {
1092: if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1093: p += HBLKSIZE;
1094: }
1095: return(FALSE);
1096: }
1097: }
1098: #endif /* SMALL_CONFIG */
1099:
1100: /* Similar to GC_push_next_marked, but return address of next block */
1101: struct hblk * GC_push_next_marked(h)
1102: struct hblk *h;
1103: {
1104: register hdr * hhdr;
1105:
1106: h = GC_next_used_block(h);
1107: if (h == 0) return(0);
1108: hhdr = HDR(h);
1109: GC_push_marked(h, hhdr);
1110: return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1111: }
1112:
1113: #ifndef SMALL_CONFIG
1114: /* Identical to above, but mark only from dirty pages */
1115: struct hblk * GC_push_next_marked_dirty(h)
1116: struct hblk *h;
1117: {
1118: register hdr * hhdr = HDR(h);
1119:
1120: if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1121: for (;;) {
1122: h = GC_next_used_block(h);
1123: if (h == 0) return(0);
1124: hhdr = HDR(h);
1125: # ifdef STUBBORN_ALLOC
1126: if (hhdr -> hb_obj_kind == STUBBORN) {
1127: if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1128: break;
1129: }
1130: } else {
1131: if (GC_block_was_dirty(h, hhdr)) break;
1132: }
1133: # else
1134: if (GC_block_was_dirty(h, hhdr)) break;
1135: # endif
1136: h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1137: }
1138: GC_push_marked(h, hhdr);
1139: return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1140: }
1141: #endif
1142:
1143: /* Similar to above, but for uncollectable pages. Needed since we */
1144: /* do not clear marks for such pages, even for full collections. */
1145: struct hblk * GC_push_next_marked_uncollectable(h)
1146: struct hblk *h;
1147: {
1148: register hdr * hhdr = HDR(h);
1149:
1150: for (;;) {
1151: h = GC_next_used_block(h);
1152: if (h == 0) return(0);
1153: hhdr = HDR(h);
1154: if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1155: h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1156: }
1157: GC_push_marked(h, hhdr);
1158: return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1159: }
1160:
1161:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>