Annotation of OpenXM_contrib2/asir2000/gc5.3/blacklst.c, Revision 1.1.1.1
1.1 noro 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4: *
5: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7: *
8: * Permission is hereby granted to use or copy this program
9: * for any purpose, provided the above notices are retained on all copies.
10: * Permission to modify the code and to distribute modified code is granted,
11: * provided the above notices are retained, and a notice that the code was
12: * modified is included with the above copyright notice.
13: */
14: /* Boehm, August 9, 1995 6:09 pm PDT */
15: # include "gc_priv.h"
16:
17: /*
18: * We maintain several hash tables of hblks that have had false hits.
19: * Each contains one bit per hash bucket; If any page in the bucket
20: * has had a false hit, we assume that all of them have.
21: * See the definition of page_hash_table in gc_private.h.
22: * False hits from the stack(s) are much more dangerous than false hits
23: * from elsewhere, since the former can pin a large object that spans the
24: * block, eventhough it does not start on the dangerous block.
25: */
26:
27: /*
28: * Externally callable routines are:
29:
30: * GC_add_to_black_list_normal
31: * GC_add_to_black_list_stack
32: * GC_promote_black_lists
33: * GC_is_black_listed
34: *
35: * All require that the allocator lock is held.
36: */
37:
38: /* Pointers to individual tables. We replace one table by another by */
39: /* switching these pointers. */
40: word * GC_old_normal_bl;
41: /* Nonstack false references seen at last full */
42: /* collection. */
43: word * GC_incomplete_normal_bl;
44: /* Nonstack false references seen since last */
45: /* full collection. */
46: word * GC_old_stack_bl;
47: word * GC_incomplete_stack_bl;
48:
49: word GC_total_stack_black_listed;
50:
51: word GC_black_list_spacing = MINHINCR*HBLKSIZE; /* Initial rough guess */
52:
53: void GC_clear_bl();
54:
55: void GC_default_print_heap_obj_proc(p)
56: ptr_t p;
57: {
58: ptr_t base = GC_base(p);
59:
60: GC_err_printf2("start: 0x%lx, appr. length: %ld", base, GC_size(base));
61: }
62:
63: void (*GC_print_heap_obj)(/* char * s, ptr_t p */) =
64: GC_default_print_heap_obj_proc;
65:
66: void GC_print_source_ptr(p)
67: ptr_t p;
68: {
69: ptr_t base = GC_base(p);
70: if (0 == base) {
71: if (0 == p) {
72: GC_err_printf0("in register");
73: } else {
74: GC_err_printf0("in root set");
75: }
76: } else {
77: GC_err_printf0("in object at ");
78: (*GC_print_heap_obj)(base);
79: }
80: }
81:
82: void GC_bl_init()
83: {
84: # ifndef ALL_INTERIOR_POINTERS
85: GC_old_normal_bl = (word *)
86: GC_scratch_alloc((word)(sizeof (page_hash_table)));
87: GC_incomplete_normal_bl = (word *)GC_scratch_alloc
88: ((word)(sizeof(page_hash_table)));
89: if (GC_old_normal_bl == 0 || GC_incomplete_normal_bl == 0) {
90: GC_err_printf0("Insufficient memory for black list\n");
91: EXIT();
92: }
93: GC_clear_bl(GC_old_normal_bl);
94: GC_clear_bl(GC_incomplete_normal_bl);
95: # endif
96: GC_old_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(page_hash_table)));
97: GC_incomplete_stack_bl = (word *)GC_scratch_alloc
98: ((word)(sizeof(page_hash_table)));
99: if (GC_old_stack_bl == 0 || GC_incomplete_stack_bl == 0) {
100: GC_err_printf0("Insufficient memory for black list\n");
101: EXIT();
102: }
103: GC_clear_bl(GC_old_stack_bl);
104: GC_clear_bl(GC_incomplete_stack_bl);
105: }
106:
107: void GC_clear_bl(doomed)
108: word *doomed;
109: {
110: BZERO(doomed, sizeof(page_hash_table));
111: }
112:
113: void GC_copy_bl(old, new)
114: word *new, *old;
115: {
116: BCOPY(old, new, sizeof(page_hash_table));
117: }
118:
119: static word total_stack_black_listed();
120:
121: /* Signal the completion of a collection. Turn the incomplete black */
122: /* lists into new black lists, etc. */
123: void GC_promote_black_lists()
124: {
125: word * very_old_normal_bl = GC_old_normal_bl;
126: word * very_old_stack_bl = GC_old_stack_bl;
127:
128: GC_old_normal_bl = GC_incomplete_normal_bl;
129: GC_old_stack_bl = GC_incomplete_stack_bl;
130: # ifndef ALL_INTERIOR_POINTERS
131: GC_clear_bl(very_old_normal_bl);
132: # endif
133: GC_clear_bl(very_old_stack_bl);
134: GC_incomplete_normal_bl = very_old_normal_bl;
135: GC_incomplete_stack_bl = very_old_stack_bl;
136: GC_total_stack_black_listed = total_stack_black_listed();
137: # ifdef PRINTSTATS
138: GC_printf1("%ld bytes in heap blacklisted for interior pointers\n",
139: (unsigned long)GC_total_stack_black_listed);
140: # endif
141: if (GC_total_stack_black_listed != 0) {
142: GC_black_list_spacing =
143: HBLKSIZE*(GC_heapsize/GC_total_stack_black_listed);
144: }
145: if (GC_black_list_spacing < 3 * HBLKSIZE) {
146: GC_black_list_spacing = 3 * HBLKSIZE;
147: }
148: if (GC_black_list_spacing > MAXHINCR * HBLKSIZE) {
149: GC_black_list_spacing = MAXHINCR * HBLKSIZE;
150: /* Makes it easier to allocate really huge blocks, which otherwise */
151: /* may have problems with nonuniform blacklist distributions. */
152: /* This way we should always succeed immediately after growing the */
153: /* heap. */
154: }
155: }
156:
157: void GC_unpromote_black_lists()
158: {
159: # ifndef ALL_INTERIOR_POINTERS
160: GC_copy_bl(GC_old_normal_bl, GC_incomplete_normal_bl);
161: # endif
162: GC_copy_bl(GC_old_stack_bl, GC_incomplete_stack_bl);
163: }
164:
165: # ifndef ALL_INTERIOR_POINTERS
166: /* P is not a valid pointer reference, but it falls inside */
167: /* the plausible heap bounds. */
168: /* Add it to the normal incomplete black list if appropriate. */
169: #ifdef PRINT_BLACK_LIST
170: void GC_add_to_black_list_normal(p, source)
171: ptr_t source;
172: #else
173: void GC_add_to_black_list_normal(p)
174: #endif
175: word p;
176: {
177: if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
178: {
179: register int index = PHT_HASH(p);
180:
181: if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
182: # ifdef PRINT_BLACK_LIST
183: if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
184: GC_err_printf2(
185: "Black listing (normal) 0x%lx referenced from 0x%lx ",
186: (unsigned long) p, (unsigned long) source);
187: GC_print_source_ptr(source);
188: GC_err_puts("\n");
189: }
190: # endif
191: set_pht_entry_from_index(GC_incomplete_normal_bl, index);
192: } /* else this is probably just an interior pointer to an allocated */
193: /* object, and isn't worth black listing. */
194: }
195: }
196: # endif
197:
198: /* And the same for false pointers from the stack. */
199: #ifdef PRINT_BLACK_LIST
200: void GC_add_to_black_list_stack(p, source)
201: ptr_t source;
202: #else
203: void GC_add_to_black_list_stack(p)
204: #endif
205: word p;
206: {
207: register int index = PHT_HASH(p);
208:
209: if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
210: # ifdef PRINT_BLACK_LIST
211: if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
212: GC_err_printf2(
213: "Black listing (stack) 0x%lx referenced from 0x%lx ",
214: (unsigned long)p, (unsigned long)source);
215: GC_print_source_ptr(source);
216: GC_err_puts("\n");
217: }
218: # endif
219: set_pht_entry_from_index(GC_incomplete_stack_bl, index);
220: }
221: }
222:
223: /*
224: * Is the block starting at h of size len bytes black listed? If so,
225: * return the address of the next plausible r such that (r, len) might not
226: * be black listed. (R may not actually be in the heap. We guarantee only
227: * that every smaller value of r after h is also black listed.)
228: * If (h,len) is not black listed, return 0.
229: * Knows about the structure of the black list hash tables.
230: */
231: struct hblk * GC_is_black_listed(h, len)
232: struct hblk * h;
233: word len;
234: {
235: register int index = PHT_HASH((word)h);
236: register word i;
237: word nblocks = divHBLKSZ(len);
238:
239: # ifndef ALL_INTERIOR_POINTERS
240: if (get_pht_entry_from_index(GC_old_normal_bl, index)
241: || get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
242: return(h+1);
243: }
244: # endif
245:
246: for (i = 0; ; ) {
247: if (GC_old_stack_bl[divWORDSZ(index)] == 0
248: && GC_incomplete_stack_bl[divWORDSZ(index)] == 0) {
249: /* An easy case */
250: i += WORDSZ - modWORDSZ(index);
251: } else {
252: if (get_pht_entry_from_index(GC_old_stack_bl, index)
253: || get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
254: return(h+i+1);
255: }
256: i++;
257: }
258: if (i >= nblocks) break;
259: index = PHT_HASH((word)(h+i));
260: }
261: return(0);
262: }
263:
264:
265: /* Return the number of blacklisted blocks in a given range. */
266: /* Used only for statistical purposes. */
267: /* Looks only at the GC_incomplete_stack_bl. */
268: word GC_number_stack_black_listed(start, endp1)
269: struct hblk *start, *endp1;
270: {
271: register struct hblk * h;
272: word result = 0;
273:
274: for (h = start; h < endp1; h++) {
275: register int index = PHT_HASH((word)h);
276:
277: if (get_pht_entry_from_index(GC_old_stack_bl, index)) result++;
278: }
279: return(result);
280: }
281:
282:
283: /* Return the total number of (stack) black-listed bytes. */
284: static word total_stack_black_listed()
285: {
286: register unsigned i;
287: word total = 0;
288:
289: for (i = 0; i < GC_n_heap_sects; i++) {
290: struct hblk * start = (struct hblk *) GC_heap_sects[i].hs_start;
291: word len = (word) GC_heap_sects[i].hs_bytes;
292: struct hblk * endp1 = start + len/HBLKSIZE;
293:
294: total += GC_number_stack_black_listed(start, endp1);
295: }
296: return(total * HBLKSIZE);
297: }
298:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>