Annotation of OpenXM_contrib/gc/allchblk.c, Revision 1.1.1.3
1.1 maekawa 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
1.1.1.2 maekawa 4: * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
1.1 maekawa 6: *
7: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9: *
10: * Permission is hereby granted to use or copy this program
11: * for any purpose, provided the above notices are retained on all copies.
12: * Permission to modify the code and to distribute modified code is granted,
13: * provided the above notices are retained, and a notice that the code was
14: * modified is included with the above copyright notice.
15: */
16:
17: #define DEBUG
18: #undef DEBUG
19: #include <stdio.h>
20: #include "gc_priv.h"
21:
1.1.1.3 ! maekawa 22: GC_bool GC_use_entire_heap = 0;
1.1 maekawa 23:
24: /*
1.1.1.2 maekawa 25: * Free heap blocks are kept on one of several free lists,
26: * depending on the size of the block. Each free list is doubly linked.
27: * Adjacent free blocks are coalesced.
1.1 maekawa 28: */
29:
30:
31: # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
32: /* largest block we will allocate starting on a black */
33: /* listed block. Must be >= HBLKSIZE. */
34:
35:
1.1.1.2 maekawa 36: # define UNIQUE_THRESHOLD 32
37: /* Sizes up to this many HBLKs each have their own free list */
38: # define HUGE_THRESHOLD 256
39: /* Sizes of at least this many heap blocks are mapped to a */
40: /* single free list. */
41: # define FL_COMPRESSION 8
42: /* In between sizes map this many distinct sizes to a single */
43: /* bin. */
44:
45: # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
46: + UNIQUE_THRESHOLD
47:
48: struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
49:
50: /* Map a number of blocks to the appropriate large block free list index. */
51: int GC_hblk_fl_from_blocks(blocks_needed)
52: word blocks_needed;
53: {
54: if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
55: if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
56: return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
57: + UNIQUE_THRESHOLD;
58:
59: }
60:
61: # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
62: # define PHDR(hhdr) HDR(hhdr -> hb_prev)
63: # define NHDR(hhdr) HDR(hhdr -> hb_next)
64:
65: # ifdef USE_MUNMAP
66: # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
67: # else /* !USE_MMAP */
68: # define IS_MAPPED(hhdr) 1
69: # endif /* USE_MUNMAP */
1.1 maekawa 70:
71: # if !defined(NO_DEBUGGING)
72: void GC_print_hblkfreelist()
73: {
1.1.1.2 maekawa 74: struct hblk * h;
1.1 maekawa 75: word total_free = 0;
1.1.1.2 maekawa 76: hdr * hhdr;
1.1 maekawa 77: word sz;
1.1.1.2 maekawa 78: int i;
1.1 maekawa 79:
1.1.1.2 maekawa 80: for (i = 0; i <= N_HBLK_FLS; ++i) {
81: h = GC_hblkfreelist[i];
82: if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
83: while (h != 0) {
84: hhdr = HDR(h);
1.1 maekawa 85: sz = hhdr -> hb_sz;
1.1.1.2 maekawa 86: GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
1.1 maekawa 87: total_free += sz;
88: if (GC_is_black_listed(h, HBLKSIZE) != 0) {
89: GC_printf0("start black listed\n");
90: } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
91: GC_printf0("partially black listed\n");
92: } else {
93: GC_printf0("not black listed\n");
94: }
95: h = hhdr -> hb_next;
1.1.1.2 maekawa 96: }
97: }
98: if (total_free != GC_large_free_bytes) {
99: GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
100: (unsigned long) GC_large_free_bytes);
1.1 maekawa 101: }
102: GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
103: }
104:
1.1.1.2 maekawa 105: /* Return the free list index on which the block described by the header */
106: /* appears, or -1 if it appears nowhere. */
107: int free_list_index_of(wanted)
108: hdr * wanted;
109: {
110: struct hblk * h;
111: hdr * hhdr;
112: int i;
113:
114: for (i = 0; i <= N_HBLK_FLS; ++i) {
115: h = GC_hblkfreelist[i];
116: while (h != 0) {
117: hhdr = HDR(h);
118: if (hhdr == wanted) return i;
119: h = hhdr -> hb_next;
120: }
121: }
122: return -1;
123: }
124:
125: void GC_dump_regions()
126: {
127: unsigned i;
128: ptr_t start, end;
129: ptr_t p;
130: size_t bytes;
131: hdr *hhdr;
132: for (i = 0; i < GC_n_heap_sects; ++i) {
133: start = GC_heap_sects[i].hs_start;
134: bytes = GC_heap_sects[i].hs_bytes;
135: end = start + bytes;
136: /* Merge in contiguous sections. */
137: while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
138: ++i;
139: end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
140: }
141: GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
142: for (p = start; p < end;) {
143: hhdr = HDR(p);
144: GC_printf1("\t0x%lx ", (unsigned long)p);
145: if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
146: GC_printf1("Missing header!!\n", hhdr);
147: p += HBLKSIZE;
148: continue;
149: }
150: if (HBLK_IS_FREE(hhdr)) {
151: int correct_index = GC_hblk_fl_from_blocks(
152: divHBLKSZ(hhdr -> hb_sz));
153: int actual_index;
154:
155: GC_printf1("\tfree block of size 0x%lx bytes",
156: (unsigned long)(hhdr -> hb_sz));
157: if (IS_MAPPED(hhdr)) {
158: GC_printf0("\n");
159: } else {
160: GC_printf0("(unmapped)\n");
161: }
162: actual_index = free_list_index_of(hhdr);
163: if (-1 == actual_index) {
164: GC_printf1("\t\tBlock not on free list %ld!!\n",
165: correct_index);
166: } else if (correct_index != actual_index) {
167: GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
168: actual_index, correct_index);
169: }
170: p += hhdr -> hb_sz;
171: } else {
172: GC_printf1("\tused for blocks of size 0x%lx bytes\n",
173: (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
174: p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
175: }
176: }
177: }
178: }
179:
1.1 maekawa 180: # endif /* NO_DEBUGGING */
181:
182: /* Initialize hdr for a block containing the indicated size and */
183: /* kind of objects. */
184: /* Return FALSE on failure. */
185: static GC_bool setup_header(hhdr, sz, kind, flags)
186: register hdr * hhdr;
187: word sz; /* object size in words */
188: int kind;
189: unsigned char flags;
190: {
191: register word descr;
192:
193: /* Add description of valid object pointers */
194: if (!GC_add_map_entry(sz)) return(FALSE);
195: hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
196:
197: /* Set size, kind and mark proc fields */
198: hhdr -> hb_sz = sz;
199: hhdr -> hb_obj_kind = kind;
200: hhdr -> hb_flags = flags;
201: descr = GC_obj_kinds[kind].ok_descriptor;
202: if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
203: hhdr -> hb_descr = descr;
204:
205: /* Clear mark bits */
206: GC_clear_hdr_marks(hhdr);
207:
208: hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
209: return(TRUE);
210: }
211:
1.1.1.2 maekawa 212: #define FL_UNKNOWN -1
213: /*
214: * Remove hhdr from the appropriate free list.
215: * We assume it is on the nth free list, or on the size
216: * appropriate free list if n is FL_UNKNOWN.
217: */
218: void GC_remove_from_fl(hhdr, n)
219: hdr * hhdr;
220: int n;
221: {
222: GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
223: if (hhdr -> hb_prev == 0) {
224: int index;
225: if (FL_UNKNOWN == n) {
226: index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
227: } else {
228: index = n;
229: }
230: GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
231: GC_hblkfreelist[index] = hhdr -> hb_next;
232: } else {
1.1.1.3 ! maekawa 233: hdr *phdr;
! 234: GET_HDR(hhdr -> hb_prev, phdr);
! 235: phdr -> hb_next = hhdr -> hb_next;
1.1.1.2 maekawa 236: }
237: if (0 != hhdr -> hb_next) {
1.1.1.3 ! maekawa 238: hdr * nhdr;
1.1.1.2 maekawa 239: GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
1.1.1.3 ! maekawa 240: GET_HDR(hhdr -> hb_next, nhdr);
! 241: nhdr -> hb_prev = hhdr -> hb_prev;
1.1.1.2 maekawa 242: }
243: }
244:
245: /*
246: * Return a pointer to the free block ending just before h, if any.
247: */
248: struct hblk * GC_free_block_ending_at(h)
249: struct hblk *h;
250: {
251: struct hblk * p = h - 1;
1.1.1.3 ! maekawa 252: hdr * phdr;
1.1.1.2 maekawa 253:
1.1.1.3 ! maekawa 254: GET_HDR(p, phdr);
1.1.1.2 maekawa 255: while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
256: p = FORWARDED_ADDR(p,phdr);
257: phdr = HDR(p);
258: }
1.1.1.3 ! maekawa 259: if (0 != phdr) {
! 260: if(HBLK_IS_FREE(phdr)) {
! 261: return p;
! 262: } else {
! 263: return 0;
! 264: }
! 265: }
1.1.1.2 maekawa 266: p = GC_prev_block(h - 1);
267: if (0 != p) {
268: phdr = HDR(p);
269: if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
270: return p;
271: }
272: }
273: return 0;
274: }
275:
276: /*
277: * Add hhdr to the appropriate free list.
278: * We maintain individual free lists sorted by address.
279: */
280: void GC_add_to_fl(h, hhdr)
281: struct hblk *h;
282: hdr * hhdr;
283: {
284: int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
285: struct hblk *second = GC_hblkfreelist[index];
1.1.1.3 ! maekawa 286: hdr * second_hdr;
1.1.1.2 maekawa 287: # ifdef GC_ASSERTIONS
288: struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
289: hdr * nexthdr = HDR(next);
290: struct hblk *prev = GC_free_block_ending_at(h);
291: hdr * prevhdr = HDR(prev);
292: GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
293: GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
294: # endif
295: GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
296: GC_hblkfreelist[index] = h;
297: hhdr -> hb_next = second;
298: hhdr -> hb_prev = 0;
1.1.1.3 ! maekawa 299: if (0 != second) {
! 300: GET_HDR(second, second_hdr);
! 301: second_hdr -> hb_prev = h;
! 302: }
1.1.1.2 maekawa 303: GC_invalidate_map(hhdr);
304: }
305:
306: #ifdef USE_MUNMAP
1.1 maekawa 307:
1.1.1.2 maekawa 308: /* Unmap blocks that haven't been recently touched. This is the only way */
309: /* way blocks are ever unmapped. */
310: void GC_unmap_old(void)
311: {
312: struct hblk * h;
313: hdr * hhdr;
314: word sz;
315: unsigned short last_rec, threshold;
316: int i;
317: # define UNMAP_THRESHOLD 6
318:
319: for (i = 0; i <= N_HBLK_FLS; ++i) {
320: for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
321: hhdr = HDR(h);
322: if (!IS_MAPPED(hhdr)) continue;
323: threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
324: last_rec = hhdr -> hb_last_reclaimed;
325: if (last_rec > GC_gc_no
326: || last_rec < threshold && threshold < GC_gc_no
327: /* not recently wrapped */) {
328: sz = hhdr -> hb_sz;
329: GC_unmap((ptr_t)h, sz);
330: hhdr -> hb_flags |= WAS_UNMAPPED;
331: }
332: }
333: }
334: }
335:
336: /* Merge all unmapped blocks that are adjacent to other free */
337: /* blocks. This may involve remapping, since all blocks are either */
338: /* fully mapped or fully unmapped. */
339: void GC_merge_unmapped(void)
340: {
341: struct hblk * h, *next;
342: hdr * hhdr, *nexthdr;
343: word size, nextsize;
344: int i;
345:
346: for (i = 0; i <= N_HBLK_FLS; ++i) {
347: h = GC_hblkfreelist[i];
348: while (h != 0) {
1.1.1.3 ! maekawa 349: GET_HDR(h, hhdr);
1.1.1.2 maekawa 350: size = hhdr->hb_sz;
351: next = (struct hblk *)((word)h + size);
1.1.1.3 ! maekawa 352: GET_HDR(next, nexthdr);
1.1.1.2 maekawa 353: /* Coalesce with successor, if possible */
354: if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
355: nextsize = nexthdr -> hb_sz;
356: if (IS_MAPPED(hhdr)) {
357: GC_ASSERT(!IS_MAPPED(nexthdr));
358: /* make both consistent, so that we can merge */
359: if (size > nextsize) {
360: GC_remap((ptr_t)next, nextsize);
361: } else {
362: GC_unmap((ptr_t)h, size);
363: hhdr -> hb_flags |= WAS_UNMAPPED;
364: }
365: } else if (IS_MAPPED(nexthdr)) {
366: GC_ASSERT(!IS_MAPPED(hhdr));
367: if (size > nextsize) {
368: GC_unmap((ptr_t)next, nextsize);
369: } else {
370: GC_remap((ptr_t)h, size);
371: hhdr -> hb_flags &= ~WAS_UNMAPPED;
372: }
373: } else {
374: /* Unmap any gap in the middle */
375: GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
376: }
377: /* If they are both unmapped, we merge, but leave unmapped. */
378: GC_remove_from_fl(hhdr, i);
379: GC_remove_from_fl(nexthdr, FL_UNKNOWN);
380: hhdr -> hb_sz += nexthdr -> hb_sz;
381: GC_remove_header(next);
382: GC_add_to_fl(h, hhdr);
383: /* Start over at beginning of list */
384: h = GC_hblkfreelist[i];
385: } else /* not mergable with successor */ {
386: h = hhdr -> hb_next;
387: }
388: } /* while (h != 0) ... */
389: } /* for ... */
390: }
391:
392: #endif /* USE_MUNMAP */
393:
394: /*
395: * Return a pointer to a block starting at h of length bytes.
396: * Memory for the block is mapped.
397: * Remove the block from its free list, and return the remainder (if any)
398: * to its appropriate free list.
399: * May fail by returning 0.
400: * The header for the returned block must be set up by the caller.
401: * If the return value is not 0, then hhdr is the header for it.
402: */
403: struct hblk * GC_get_first_part(h, hhdr, bytes, index)
404: struct hblk *h;
405: hdr * hhdr;
406: word bytes;
407: int index;
408: {
409: word total_size = hhdr -> hb_sz;
410: struct hblk * rest;
411: hdr * rest_hdr;
412:
413: GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
414: GC_remove_from_fl(hhdr, index);
415: if (total_size == bytes) return h;
416: rest = (struct hblk *)((word)h + bytes);
1.1.1.3 ! maekawa 417: rest_hdr = GC_install_header(rest);
! 418: if (0 == rest_hdr) return(0);
1.1.1.2 maekawa 419: rest_hdr -> hb_sz = total_size - bytes;
420: rest_hdr -> hb_flags = 0;
421: # ifdef GC_ASSERTIONS
422: // Mark h not free, to avoid assertion about adjacent free blocks.
423: hhdr -> hb_map = 0;
424: # endif
425: GC_add_to_fl(rest, rest_hdr);
426: return h;
427: }
428:
429: /*
430: * H is a free block. N points at an address inside it.
431: * A new header for n has already been set up. Fix up h's header
432: * to reflect the fact that it is being split, move it to the
433: * appropriate free list.
434: * N replaces h in the original free list.
435: *
436: * Nhdr is not completely filled in, since it is about to allocated.
437: * It may in fact end up on the wrong free list for its size.
438: * (Hence adding it to a free list is silly. But this path is hopefully
439: * rare enough that it doesn't matter. The code is cleaner this way.)
440: */
441: void GC_split_block(h, hhdr, n, nhdr, index)
442: struct hblk *h;
443: hdr * hhdr;
444: struct hblk *n;
445: hdr * nhdr;
446: int index; /* Index of free list */
447: {
448: word total_size = hhdr -> hb_sz;
449: word h_size = (word)n - (word)h;
450: struct hblk *prev = hhdr -> hb_prev;
451: struct hblk *next = hhdr -> hb_next;
452:
453: /* Replace h with n on its freelist */
454: nhdr -> hb_prev = prev;
455: nhdr -> hb_next = next;
456: nhdr -> hb_sz = total_size - h_size;
457: nhdr -> hb_flags = 0;
458: if (0 != prev) {
459: HDR(prev) -> hb_next = n;
460: } else {
461: GC_hblkfreelist[index] = n;
462: }
463: if (0 != next) {
464: HDR(next) -> hb_prev = n;
465: }
466: # ifdef GC_ASSERTIONS
467: nhdr -> hb_map = 0; /* Don't fail test for consecutive */
468: /* free blocks in GC_add_to_fl. */
469: # endif
470: # ifdef USE_MUNMAP
471: hhdr -> hb_last_reclaimed = GC_gc_no;
472: # endif
473: hhdr -> hb_sz = h_size;
474: GC_add_to_fl(h, hhdr);
475: GC_invalidate_map(nhdr);
476: }
1.1 maekawa 477:
1.1.1.2 maekawa 478: struct hblk * GC_allochblk_nth();
479:
1.1 maekawa 480: /*
481: * Allocate (and return pointer to) a heap block
1.1.1.2 maekawa 482: * for objects of size sz words, searching the nth free list.
1.1 maekawa 483: *
484: * NOTE: We set obj_map field in header correctly.
1.1.1.2 maekawa 485: * Caller is responsible for building an object freelist in block.
1.1 maekawa 486: *
487: * We clear the block if it is destined for large objects, and if
488: * kind requires that newly allocated objects be cleared.
489: */
490: struct hblk *
491: GC_allochblk(sz, kind, flags)
492: word sz;
493: int kind;
494: unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
495: {
1.1.1.2 maekawa 496: int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
497: int i;
498: for (i = start_list; i <= N_HBLK_FLS; ++i) {
499: struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
500: if (0 != result) return result;
501: }
502: return 0;
503: }
504: /*
505: * The same, but with search restricted to nth free list.
506: */
507: struct hblk *
508: GC_allochblk_nth(sz, kind, flags, n)
509: word sz;
510: int kind;
511: unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
512: int n;
513: {
1.1 maekawa 514: register struct hblk *hbp;
515: register hdr * hhdr; /* Header corr. to hbp */
1.1.1.2 maekawa 516: register struct hblk *thishbp;
517: register hdr * thishdr; /* Header corr. to hbp */
1.1 maekawa 518: signed_word size_needed; /* number of bytes in requested objects */
519: signed_word size_avail; /* bytes available in this block */
520:
521: size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
522:
523: /* search for a big enough block in free list */
1.1.1.2 maekawa 524: hbp = GC_hblkfreelist[n];
1.1.1.3 ! maekawa 525: for(; 0 != hbp; hbp = hhdr -> hb_next) {
! 526: GET_HDR(hbp, hhdr);
1.1 maekawa 527: size_avail = hhdr->hb_sz;
528: if (size_avail < size_needed) continue;
1.1.1.3 ! maekawa 529: if (!GC_use_entire_heap) {
1.1 maekawa 530: if (size_avail != size_needed
1.1.1.3 ! maekawa 531: && USED_HEAP_SIZE >= GC_requested_heapsize
1.1.1.2 maekawa 532: && !GC_incremental && GC_should_collect()) {
1.1 maekawa 533: continue;
534: }
1.1.1.3 ! maekawa 535: }
1.1 maekawa 536: /* If the next heap block is obviously better, go on. */
537: /* This prevents us from disassembling a single large block */
538: /* to get tiny blocks. */
539: {
540: signed_word next_size;
541:
542: thishbp = hhdr -> hb_next;
1.1.1.2 maekawa 543: if (thishbp != 0) {
1.1.1.3 ! maekawa 544: GET_HDR(thishbp, thishdr);
1.1.1.2 maekawa 545: next_size = (signed_word)(thishdr -> hb_sz);
546: if (next_size < size_avail
1.1 maekawa 547: && next_size >= size_needed
548: && !GC_is_black_listed(thishbp, (word)size_needed)) {
549: continue;
1.1.1.2 maekawa 550: }
1.1 maekawa 551: }
552: }
553: if ( !IS_UNCOLLECTABLE(kind) &&
554: (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
555: struct hblk * lasthbp = hbp;
556: ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
557: signed_word orig_avail = size_avail;
558: signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
559: HBLKSIZE
560: : size_needed);
561:
562:
563: while ((ptr_t)lasthbp <= search_end
564: && (thishbp = GC_is_black_listed(lasthbp,
565: (word)eff_size_needed))) {
566: lasthbp = thishbp;
567: }
568: size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
569: thishbp = lasthbp;
570: if (size_avail >= size_needed) {
1.1.1.3 ! maekawa 571: if (thishbp != hbp &&
! 572: 0 != (thishdr = GC_install_header(thishbp))) {
1.1.1.2 maekawa 573: /* Make sure it's mapped before we mangle it. */
574: # ifdef USE_MUNMAP
575: if (!IS_MAPPED(hhdr)) {
1.1.1.3 ! maekawa 576: GC_remap((ptr_t)hbp, hhdr -> hb_sz);
1.1.1.2 maekawa 577: hhdr -> hb_flags &= ~WAS_UNMAPPED;
578: }
579: # endif
1.1 maekawa 580: /* Split the block at thishbp */
1.1.1.2 maekawa 581: GC_split_block(hbp, hhdr, thishbp, thishdr, n);
1.1 maekawa 582: /* Advance to thishbp */
583: hbp = thishbp;
584: hhdr = thishdr;
1.1.1.2 maekawa 585: /* We must now allocate thishbp, since it may */
586: /* be on the wrong free list. */
1.1 maekawa 587: }
588: } else if (size_needed > (signed_word)BL_LIMIT
589: && orig_avail - size_needed
590: > (signed_word)BL_LIMIT) {
591: /* Punt, since anything else risks unreasonable heap growth. */
592: WARN("Needed to allocate blacklisted block at 0x%lx\n",
593: (word)hbp);
594: size_avail = orig_avail;
1.1.1.2 maekawa 595: } else if (size_avail == 0 && size_needed == HBLKSIZE
596: && IS_MAPPED(hhdr)) {
597: if (!GC_find_leak) {
1.1 maekawa 598: static unsigned count = 0;
599:
600: /* The block is completely blacklisted. We need */
601: /* to drop some such blocks, since otherwise we spend */
602: /* all our time traversing them if pointerfree */
603: /* blocks are unpopular. */
604: /* A dropped block will be reconsidered at next GC. */
605: if ((++count & 3) == 0) {
606: /* Allocate and drop the block in small chunks, to */
607: /* maximize the chance that we will recover some */
608: /* later. */
1.1.1.2 maekawa 609: word total_size = hhdr -> hb_sz;
610: struct hblk * limit = hbp + divHBLKSZ(total_size);
1.1 maekawa 611: struct hblk * h;
1.1.1.2 maekawa 612: struct hblk * prev = hhdr -> hb_prev;
1.1 maekawa 613:
1.1.1.2 maekawa 614: GC_words_wasted += total_size;
615: GC_large_free_bytes -= total_size;
616: GC_remove_from_fl(hhdr, n);
1.1 maekawa 617: for (h = hbp; h < limit; h++) {
1.1.1.3 ! maekawa 618: if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
1.1 maekawa 619: (void) setup_header(
620: hhdr,
621: BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
622: PTRFREE, 0); /* Cant fail */
623: if (GC_debugging_started) {
1.1.1.2 maekawa 624: BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
1.1 maekawa 625: }
626: }
627: }
628: /* Restore hbp to point at free block */
1.1.1.2 maekawa 629: hbp = prev;
630: if (0 == hbp) {
631: return GC_allochblk_nth(sz, kind, flags, n);
632: }
633: hhdr = HDR(hbp);
1.1 maekawa 634: }
1.1.1.2 maekawa 635: }
1.1 maekawa 636: }
637: }
638: if( size_avail >= size_needed ) {
1.1.1.2 maekawa 639: # ifdef USE_MUNMAP
640: if (!IS_MAPPED(hhdr)) {
1.1.1.3 ! maekawa 641: GC_remap((ptr_t)hbp, hhdr -> hb_sz);
1.1.1.2 maekawa 642: hhdr -> hb_flags &= ~WAS_UNMAPPED;
643: }
644: # endif
645: /* hbp may be on the wrong freelist; the parameter n */
646: /* is important. */
647: hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
1.1 maekawa 648: break;
649: }
650: }
1.1.1.2 maekawa 651:
652: if (0 == hbp) return 0;
1.1 maekawa 653:
654: /* Notify virtual dirty bit implementation that we are about to write. */
1.1.1.2 maekawa 655: GC_write_hint(hbp);
1.1 maekawa 656:
657: /* Add it to map of valid blocks */
1.1.1.2 maekawa 658: if (!GC_install_counts(hbp, (word)size_needed)) return(0);
1.1 maekawa 659: /* This leaks memory under very rare conditions. */
660:
661: /* Set up header */
1.1.1.2 maekawa 662: if (!setup_header(hhdr, sz, kind, flags)) {
663: GC_remove_counts(hbp, (word)size_needed);
1.1 maekawa 664: return(0); /* ditto */
665: }
666:
667: /* Clear block if necessary */
668: if (GC_debugging_started
669: || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
1.1.1.2 maekawa 670: BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES);
1.1 maekawa 671: }
672:
673: /* We just successfully allocated a block. Restart count of */
674: /* consecutive failures. */
675: {
676: extern unsigned GC_fail_count;
677:
678: GC_fail_count = 0;
679: }
1.1.1.2 maekawa 680:
681: GC_large_free_bytes -= size_needed;
1.1 maekawa 682:
1.1.1.2 maekawa 683: GC_ASSERT(IS_MAPPED(hhdr));
684: return( hbp );
1.1 maekawa 685: }
686:
687: struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
688:
689: /*
690: * Free a heap block.
691: *
692: * Coalesce the block with its neighbors if possible.
693: *
694: * All mark words are assumed to be cleared.
695: */
696: void
1.1.1.2 maekawa 697: GC_freehblk(hbp)
698: struct hblk *hbp;
1.1 maekawa 699: {
1.1.1.2 maekawa 700: struct hblk *next, *prev;
701: hdr *hhdr, *prevhdr, *nexthdr;
702: signed_word size;
1.1 maekawa 703:
704:
1.1.1.3 ! maekawa 705: GET_HDR(hbp, hhdr);
1.1.1.2 maekawa 706: size = hhdr->hb_sz;
707: size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
708: GC_remove_counts(hbp, (word)size);
709: hhdr->hb_sz = size;
1.1 maekawa 710:
711: /* Check for duplicate deallocation in the easy case */
1.1.1.2 maekawa 712: if (HBLK_IS_FREE(hhdr)) {
1.1 maekawa 713: GC_printf1("Duplicate large block deallocation of 0x%lx\n",
1.1.1.2 maekawa 714: (unsigned long) hbp);
1.1 maekawa 715: }
716:
1.1.1.2 maekawa 717: GC_ASSERT(IS_MAPPED(hhdr));
718: GC_invalidate_map(hhdr);
719: next = (struct hblk *)((word)hbp + size);
1.1.1.3 ! maekawa 720: GET_HDR(next, nexthdr);
1.1.1.2 maekawa 721: prev = GC_free_block_ending_at(hbp);
1.1 maekawa 722: /* Coalesce with successor, if possible */
1.1.1.2 maekawa 723: if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
724: GC_remove_from_fl(nexthdr, FL_UNKNOWN);
725: hhdr -> hb_sz += nexthdr -> hb_sz;
726: GC_remove_header(next);
727: }
728: /* Coalesce with predecessor, if possible. */
729: if (0 != prev) {
730: prevhdr = HDR(prev);
731: if (IS_MAPPED(prevhdr)) {
732: GC_remove_from_fl(prevhdr, FL_UNKNOWN);
733: prevhdr -> hb_sz += hhdr -> hb_sz;
734: GC_remove_header(hbp);
735: hbp = prev;
736: hhdr = prevhdr;
737: }
1.1 maekawa 738: }
739:
1.1.1.2 maekawa 740: GC_large_free_bytes += size;
741: GC_add_to_fl(hbp, hhdr);
1.1 maekawa 742: }
743:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>