Annotation of OpenXM_contrib2/asir2000/gc/allchblk.c, Revision 1.4
1.1 noro 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4: * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
1.2 noro 5: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
1.1 noro 6: *
7: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9: *
10: * Permission is hereby granted to use or copy this program
11: * for any purpose, provided the above notices are retained on all copies.
12: * Permission to modify the code and to distribute modified code is granted,
13: * provided the above notices are retained, and a notice that the code was
14: * modified is included with the above copyright notice.
15: */
16:
1.4 ! noro 17: /* #define DEBUG */
1.1 noro 18: #include <stdio.h>
1.4 ! noro 19: #include "private/gc_priv.h"
1.1 noro 20:
1.3 noro 21: GC_bool GC_use_entire_heap = 0;
1.1 noro 22:
23: /*
24: * Free heap blocks are kept on one of several free lists,
25: * depending on the size of the block. Each free list is doubly linked.
26: * Adjacent free blocks are coalesced.
27: */
28:
29:
30: # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31: /* largest block we will allocate starting on a black */
32: /* listed block. Must be >= HBLKSIZE. */
33:
34:
35: # define UNIQUE_THRESHOLD 32
36: /* Sizes up to this many HBLKs each have their own free list */
37: # define HUGE_THRESHOLD 256
38: /* Sizes of at least this many heap blocks are mapped to a */
39: /* single free list. */
40: # define FL_COMPRESSION 8
41: /* In between sizes map this many distinct sizes to a single */
42: /* bin. */
43:
44: # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
45: + UNIQUE_THRESHOLD
46:
47: struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
48:
1.4 ! noro 49: #ifndef USE_MUNMAP
! 50: word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
! 51: /* Number of free bytes on each list. */
! 52:
! 53: /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
! 54: /* > GC_max_large_allocd_bytes? */
! 55: GC_bool GC_enough_large_bytes_left(bytes,n)
! 56: word bytes;
! 57: int n;
! 58: {
! 59: int i;
! 60: for (i = N_HBLK_FLS; i >= n; --i) {
! 61: bytes += GC_free_bytes[i];
! 62: if (bytes > GC_max_large_allocd_bytes) return TRUE;
! 63: }
! 64: return FALSE;
! 65: }
! 66:
! 67: # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
! 68:
! 69: # define FREE_ASSERT(e) GC_ASSERT(e)
! 70:
! 71: #else /* USE_MUNMAP */
! 72:
! 73: # define INCR_FREE_BYTES(n, b)
! 74: # define FREE_ASSERT(e)
! 75:
! 76: #endif /* USE_MUNMAP */
! 77:
1.1 noro 78: /* Map a number of blocks to the appropriate large block free list index. */
79: int GC_hblk_fl_from_blocks(blocks_needed)
80: word blocks_needed;
81: {
82: if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
83: if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
84: return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
85: + UNIQUE_THRESHOLD;
86:
87: }
88:
89: # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
90: # define PHDR(hhdr) HDR(hhdr -> hb_prev)
91: # define NHDR(hhdr) HDR(hhdr -> hb_next)
92:
93: # ifdef USE_MUNMAP
94: # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
95: # else /* !USE_MMAP */
96: # define IS_MAPPED(hhdr) 1
97: # endif /* USE_MUNMAP */
98:
99: # if !defined(NO_DEBUGGING)
100: void GC_print_hblkfreelist()
101: {
102: struct hblk * h;
103: word total_free = 0;
104: hdr * hhdr;
105: word sz;
106: int i;
107:
108: for (i = 0; i <= N_HBLK_FLS; ++i) {
109: h = GC_hblkfreelist[i];
1.4 ! noro 110: # ifdef USE_MUNMAP
! 111: if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n",
! 112: (unsigned long)i);
! 113: # else
! 114: if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
! 115: (unsigned long)i,
! 116: (unsigned long)GC_free_bytes[i]);
! 117: # endif
1.1 noro 118: while (h != 0) {
119: hhdr = HDR(h);
120: sz = hhdr -> hb_sz;
121: GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
122: total_free += sz;
123: if (GC_is_black_listed(h, HBLKSIZE) != 0) {
124: GC_printf0("start black listed\n");
125: } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
126: GC_printf0("partially black listed\n");
127: } else {
128: GC_printf0("not black listed\n");
129: }
130: h = hhdr -> hb_next;
131: }
132: }
133: if (total_free != GC_large_free_bytes) {
134: GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
135: (unsigned long) GC_large_free_bytes);
136: }
137: GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
138: }
139:
140: /* Return the free list index on which the block described by the header */
141: /* appears, or -1 if it appears nowhere. */
142: int free_list_index_of(wanted)
143: hdr * wanted;
144: {
145: struct hblk * h;
146: hdr * hhdr;
147: int i;
148:
149: for (i = 0; i <= N_HBLK_FLS; ++i) {
150: h = GC_hblkfreelist[i];
151: while (h != 0) {
152: hhdr = HDR(h);
153: if (hhdr == wanted) return i;
154: h = hhdr -> hb_next;
155: }
156: }
157: return -1;
158: }
159:
160: void GC_dump_regions()
161: {
1.2 noro 162: unsigned i;
1.1 noro 163: ptr_t start, end;
164: ptr_t p;
165: size_t bytes;
166: hdr *hhdr;
167: for (i = 0; i < GC_n_heap_sects; ++i) {
168: start = GC_heap_sects[i].hs_start;
169: bytes = GC_heap_sects[i].hs_bytes;
170: end = start + bytes;
171: /* Merge in contiguous sections. */
172: while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
173: ++i;
174: end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
175: }
176: GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
177: for (p = start; p < end;) {
178: hhdr = HDR(p);
179: GC_printf1("\t0x%lx ", (unsigned long)p);
180: if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
181: GC_printf1("Missing header!!\n", hhdr);
182: p += HBLKSIZE;
183: continue;
184: }
185: if (HBLK_IS_FREE(hhdr)) {
186: int correct_index = GC_hblk_fl_from_blocks(
187: divHBLKSZ(hhdr -> hb_sz));
188: int actual_index;
189:
190: GC_printf1("\tfree block of size 0x%lx bytes",
191: (unsigned long)(hhdr -> hb_sz));
192: if (IS_MAPPED(hhdr)) {
193: GC_printf0("\n");
194: } else {
195: GC_printf0("(unmapped)\n");
196: }
197: actual_index = free_list_index_of(hhdr);
198: if (-1 == actual_index) {
199: GC_printf1("\t\tBlock not on free list %ld!!\n",
200: correct_index);
201: } else if (correct_index != actual_index) {
202: GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
203: actual_index, correct_index);
204: }
205: p += hhdr -> hb_sz;
206: } else {
207: GC_printf1("\tused for blocks of size 0x%lx bytes\n",
208: (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
209: p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
210: }
211: }
212: }
213: }
214:
215: # endif /* NO_DEBUGGING */
216:
217: /* Initialize hdr for a block containing the indicated size and */
218: /* kind of objects. */
219: /* Return FALSE on failure. */
220: static GC_bool setup_header(hhdr, sz, kind, flags)
221: register hdr * hhdr;
222: word sz; /* object size in words */
223: int kind;
224: unsigned char flags;
225: {
226: register word descr;
227:
228: /* Add description of valid object pointers */
229: if (!GC_add_map_entry(sz)) return(FALSE);
230: hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
231:
232: /* Set size, kind and mark proc fields */
233: hhdr -> hb_sz = sz;
234: hhdr -> hb_obj_kind = kind;
235: hhdr -> hb_flags = flags;
236: descr = GC_obj_kinds[kind].ok_descriptor;
237: if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
238: hhdr -> hb_descr = descr;
239:
240: /* Clear mark bits */
241: GC_clear_hdr_marks(hhdr);
242:
243: hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
244: return(TRUE);
245: }
246:
247: #define FL_UNKNOWN -1
248: /*
249: * Remove hhdr from the appropriate free list.
250: * We assume it is on the nth free list, or on the size
251: * appropriate free list if n is FL_UNKNOWN.
252: */
253: void GC_remove_from_fl(hhdr, n)
254: hdr * hhdr;
255: int n;
256: {
1.4 ! noro 257: int index;
! 258:
1.1 noro 259: GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
1.4 ! noro 260: # ifndef USE_MUNMAP
! 261: /* We always need index to mainatin free counts. */
! 262: if (FL_UNKNOWN == n) {
! 263: index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
! 264: } else {
! 265: index = n;
! 266: }
! 267: # endif
1.1 noro 268: if (hhdr -> hb_prev == 0) {
1.4 ! noro 269: # ifdef USE_MUNMAP
! 270: if (FL_UNKNOWN == n) {
1.1 noro 271: index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
1.4 ! noro 272: } else {
1.1 noro 273: index = n;
1.4 ! noro 274: }
! 275: # endif
1.1 noro 276: GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
277: GC_hblkfreelist[index] = hhdr -> hb_next;
278: } else {
1.3 noro 279: hdr *phdr;
280: GET_HDR(hhdr -> hb_prev, phdr);
281: phdr -> hb_next = hhdr -> hb_next;
1.1 noro 282: }
1.4 ! noro 283: INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
! 284: FREE_ASSERT(GC_free_bytes[index] >= 0);
1.1 noro 285: if (0 != hhdr -> hb_next) {
1.3 noro 286: hdr * nhdr;
1.1 noro 287: GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
1.3 noro 288: GET_HDR(hhdr -> hb_next, nhdr);
289: nhdr -> hb_prev = hhdr -> hb_prev;
1.1 noro 290: }
291: }
292:
293: /*
294: * Return a pointer to the free block ending just before h, if any.
295: */
296: struct hblk * GC_free_block_ending_at(h)
297: struct hblk *h;
298: {
299: struct hblk * p = h - 1;
1.3 noro 300: hdr * phdr;
1.1 noro 301:
1.3 noro 302: GET_HDR(p, phdr);
1.1 noro 303: while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
304: p = FORWARDED_ADDR(p,phdr);
305: phdr = HDR(p);
306: }
1.3 noro 307: if (0 != phdr) {
308: if(HBLK_IS_FREE(phdr)) {
309: return p;
310: } else {
311: return 0;
312: }
313: }
1.1 noro 314: p = GC_prev_block(h - 1);
315: if (0 != p) {
316: phdr = HDR(p);
317: if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
318: return p;
319: }
320: }
321: return 0;
322: }
323:
324: /*
325: * Add hhdr to the appropriate free list.
326: * We maintain individual free lists sorted by address.
327: */
328: void GC_add_to_fl(h, hhdr)
329: struct hblk *h;
330: hdr * hhdr;
331: {
332: int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
333: struct hblk *second = GC_hblkfreelist[index];
1.3 noro 334: hdr * second_hdr;
1.1 noro 335: # ifdef GC_ASSERTIONS
336: struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
337: hdr * nexthdr = HDR(next);
338: struct hblk *prev = GC_free_block_ending_at(h);
339: hdr * prevhdr = HDR(prev);
340: GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
341: GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
342: # endif
343: GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
344: GC_hblkfreelist[index] = h;
1.4 ! noro 345: INCR_FREE_BYTES(index, hhdr -> hb_sz);
! 346: FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
1.1 noro 347: hhdr -> hb_next = second;
348: hhdr -> hb_prev = 0;
1.3 noro 349: if (0 != second) {
350: GET_HDR(second, second_hdr);
351: second_hdr -> hb_prev = h;
352: }
1.1 noro 353: GC_invalidate_map(hhdr);
354: }
355:
356: #ifdef USE_MUNMAP
357:
358: /* Unmap blocks that haven't been recently touched. This is the only way */
359: /* way blocks are ever unmapped. */
360: void GC_unmap_old(void)
361: {
362: struct hblk * h;
363: hdr * hhdr;
364: word sz;
365: unsigned short last_rec, threshold;
366: int i;
367: # define UNMAP_THRESHOLD 6
368:
369: for (i = 0; i <= N_HBLK_FLS; ++i) {
370: for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
371: hhdr = HDR(h);
372: if (!IS_MAPPED(hhdr)) continue;
373: threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
374: last_rec = hhdr -> hb_last_reclaimed;
375: if (last_rec > GC_gc_no
376: || last_rec < threshold && threshold < GC_gc_no
377: /* not recently wrapped */) {
378: sz = hhdr -> hb_sz;
379: GC_unmap((ptr_t)h, sz);
380: hhdr -> hb_flags |= WAS_UNMAPPED;
381: }
382: }
383: }
384: }
385:
386: /* Merge all unmapped blocks that are adjacent to other free */
387: /* blocks. This may involve remapping, since all blocks are either */
388: /* fully mapped or fully unmapped. */
389: void GC_merge_unmapped(void)
390: {
391: struct hblk * h, *next;
392: hdr * hhdr, *nexthdr;
393: word size, nextsize;
394: int i;
395:
396: for (i = 0; i <= N_HBLK_FLS; ++i) {
397: h = GC_hblkfreelist[i];
398: while (h != 0) {
1.3 noro 399: GET_HDR(h, hhdr);
1.1 noro 400: size = hhdr->hb_sz;
401: next = (struct hblk *)((word)h + size);
1.3 noro 402: GET_HDR(next, nexthdr);
1.1 noro 403: /* Coalesce with successor, if possible */
404: if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
405: nextsize = nexthdr -> hb_sz;
406: if (IS_MAPPED(hhdr)) {
407: GC_ASSERT(!IS_MAPPED(nexthdr));
408: /* make both consistent, so that we can merge */
409: if (size > nextsize) {
410: GC_remap((ptr_t)next, nextsize);
411: } else {
412: GC_unmap((ptr_t)h, size);
413: hhdr -> hb_flags |= WAS_UNMAPPED;
414: }
415: } else if (IS_MAPPED(nexthdr)) {
416: GC_ASSERT(!IS_MAPPED(hhdr));
417: if (size > nextsize) {
418: GC_unmap((ptr_t)next, nextsize);
419: } else {
420: GC_remap((ptr_t)h, size);
421: hhdr -> hb_flags &= ~WAS_UNMAPPED;
422: }
423: } else {
424: /* Unmap any gap in the middle */
425: GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
426: }
427: /* If they are both unmapped, we merge, but leave unmapped. */
428: GC_remove_from_fl(hhdr, i);
429: GC_remove_from_fl(nexthdr, FL_UNKNOWN);
430: hhdr -> hb_sz += nexthdr -> hb_sz;
431: GC_remove_header(next);
432: GC_add_to_fl(h, hhdr);
433: /* Start over at beginning of list */
434: h = GC_hblkfreelist[i];
435: } else /* not mergable with successor */ {
436: h = hhdr -> hb_next;
437: }
438: } /* while (h != 0) ... */
439: } /* for ... */
440: }
441:
442: #endif /* USE_MUNMAP */
443:
444: /*
445: * Return a pointer to a block starting at h of length bytes.
446: * Memory for the block is mapped.
447: * Remove the block from its free list, and return the remainder (if any)
448: * to its appropriate free list.
449: * May fail by returning 0.
450: * The header for the returned block must be set up by the caller.
451: * If the return value is not 0, then hhdr is the header for it.
452: */
453: struct hblk * GC_get_first_part(h, hhdr, bytes, index)
454: struct hblk *h;
455: hdr * hhdr;
456: word bytes;
457: int index;
458: {
459: word total_size = hhdr -> hb_sz;
460: struct hblk * rest;
461: hdr * rest_hdr;
462:
463: GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
464: GC_remove_from_fl(hhdr, index);
465: if (total_size == bytes) return h;
466: rest = (struct hblk *)((word)h + bytes);
1.3 noro 467: rest_hdr = GC_install_header(rest);
468: if (0 == rest_hdr) return(0);
1.1 noro 469: rest_hdr -> hb_sz = total_size - bytes;
470: rest_hdr -> hb_flags = 0;
471: # ifdef GC_ASSERTIONS
1.4 ! noro 472: /* Mark h not free, to avoid assertion about adjacent free blocks. */
1.1 noro 473: hhdr -> hb_map = 0;
474: # endif
475: GC_add_to_fl(rest, rest_hdr);
476: return h;
477: }
478:
479: /*
480: * H is a free block. N points at an address inside it.
481: * A new header for n has already been set up. Fix up h's header
482: * to reflect the fact that it is being split, move it to the
483: * appropriate free list.
484: * N replaces h in the original free list.
485: *
486: * Nhdr is not completely filled in, since it is about to allocated.
487: * It may in fact end up on the wrong free list for its size.
488: * (Hence adding it to a free list is silly. But this path is hopefully
489: * rare enough that it doesn't matter. The code is cleaner this way.)
490: */
491: void GC_split_block(h, hhdr, n, nhdr, index)
492: struct hblk *h;
493: hdr * hhdr;
494: struct hblk *n;
495: hdr * nhdr;
496: int index; /* Index of free list */
497: {
498: word total_size = hhdr -> hb_sz;
499: word h_size = (word)n - (word)h;
500: struct hblk *prev = hhdr -> hb_prev;
501: struct hblk *next = hhdr -> hb_next;
502:
503: /* Replace h with n on its freelist */
504: nhdr -> hb_prev = prev;
505: nhdr -> hb_next = next;
506: nhdr -> hb_sz = total_size - h_size;
507: nhdr -> hb_flags = 0;
508: if (0 != prev) {
509: HDR(prev) -> hb_next = n;
510: } else {
511: GC_hblkfreelist[index] = n;
512: }
513: if (0 != next) {
514: HDR(next) -> hb_prev = n;
515: }
1.4 ! noro 516: INCR_FREE_BYTES(index, -(signed_word)h_size);
! 517: FREE_ASSERT(GC_free_bytes[index] > 0);
1.1 noro 518: # ifdef GC_ASSERTIONS
519: nhdr -> hb_map = 0; /* Don't fail test for consecutive */
520: /* free blocks in GC_add_to_fl. */
521: # endif
522: # ifdef USE_MUNMAP
523: hhdr -> hb_last_reclaimed = GC_gc_no;
524: # endif
525: hhdr -> hb_sz = h_size;
526: GC_add_to_fl(h, hhdr);
527: GC_invalidate_map(nhdr);
528: }
529:
530: struct hblk * GC_allochblk_nth();
531:
532: /*
533: * Allocate (and return pointer to) a heap block
534: * for objects of size sz words, searching the nth free list.
535: *
536: * NOTE: We set obj_map field in header correctly.
537: * Caller is responsible for building an object freelist in block.
538: *
1.4 ! noro 539: * Unlike older versions of the collectors, the client is responsible
! 540: * for clearing the block, if necessary.
1.1 noro 541: */
542: struct hblk *
543: GC_allochblk(sz, kind, flags)
544: word sz;
545: int kind;
1.4 ! noro 546: unsigned flags; /* IGNORE_OFF_PAGE or 0 */
1.1 noro 547: {
1.4 ! noro 548: word blocks = OBJ_SZ_TO_BLOCKS(sz);
! 549: int start_list = GC_hblk_fl_from_blocks(blocks);
1.1 noro 550: int i;
551: for (i = start_list; i <= N_HBLK_FLS; ++i) {
552: struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
1.4 ! noro 553: if (0 != result) {
! 554: return result;
! 555: }
1.1 noro 556: }
557: return 0;
558: }
559: /*
560: * The same, but with search restricted to nth free list.
561: */
562: struct hblk *
563: GC_allochblk_nth(sz, kind, flags, n)
564: word sz;
565: int kind;
566: unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
567: int n;
568: {
569: register struct hblk *hbp;
570: register hdr * hhdr; /* Header corr. to hbp */
571: register struct hblk *thishbp;
572: register hdr * thishdr; /* Header corr. to hbp */
573: signed_word size_needed; /* number of bytes in requested objects */
574: signed_word size_avail; /* bytes available in this block */
575:
576: size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
577:
578: /* search for a big enough block in free list */
579: hbp = GC_hblkfreelist[n];
1.3 noro 580: for(; 0 != hbp; hbp = hhdr -> hb_next) {
581: GET_HDR(hbp, hhdr);
1.1 noro 582: size_avail = hhdr->hb_sz;
583: if (size_avail < size_needed) continue;
1.4 ! noro 584: if (!GC_use_entire_heap
! 585: && size_avail != size_needed
! 586: && USED_HEAP_SIZE >= GC_requested_heapsize
! 587: && !GC_incremental && GC_should_collect()) {
! 588: # ifdef USE_MUNMAP
1.1 noro 589: continue;
1.4 ! noro 590: # else
! 591: /* If we enough large blocks left to cover any */
! 592: /* previous request for large blocks, we go ahead */
! 593: /* and split. Assuming a steady state, that should */
! 594: /* be safe. It means that we can use the full */
! 595: /* heap if we allocate only small objects. */
! 596: if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
! 597: continue;
! 598: }
! 599: # endif /* !USE_MUNMAP */
1.3 noro 600: }
1.1 noro 601: /* If the next heap block is obviously better, go on. */
602: /* This prevents us from disassembling a single large block */
603: /* to get tiny blocks. */
604: {
605: signed_word next_size;
606:
607: thishbp = hhdr -> hb_next;
608: if (thishbp != 0) {
1.3 noro 609: GET_HDR(thishbp, thishdr);
1.1 noro 610: next_size = (signed_word)(thishdr -> hb_sz);
611: if (next_size < size_avail
612: && next_size >= size_needed
613: && !GC_is_black_listed(thishbp, (word)size_needed)) {
614: continue;
615: }
616: }
617: }
618: if ( !IS_UNCOLLECTABLE(kind) &&
619: (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
620: struct hblk * lasthbp = hbp;
621: ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
622: signed_word orig_avail = size_avail;
623: signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
624: HBLKSIZE
625: : size_needed);
626:
627:
628: while ((ptr_t)lasthbp <= search_end
629: && (thishbp = GC_is_black_listed(lasthbp,
630: (word)eff_size_needed))) {
631: lasthbp = thishbp;
632: }
633: size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
634: thishbp = lasthbp;
635: if (size_avail >= size_needed) {
1.3 noro 636: if (thishbp != hbp &&
637: 0 != (thishdr = GC_install_header(thishbp))) {
1.1 noro 638: /* Make sure it's mapped before we mangle it. */
639: # ifdef USE_MUNMAP
640: if (!IS_MAPPED(hhdr)) {
1.3 noro 641: GC_remap((ptr_t)hbp, hhdr -> hb_sz);
1.1 noro 642: hhdr -> hb_flags &= ~WAS_UNMAPPED;
643: }
644: # endif
645: /* Split the block at thishbp */
646: GC_split_block(hbp, hhdr, thishbp, thishdr, n);
647: /* Advance to thishbp */
648: hbp = thishbp;
649: hhdr = thishdr;
650: /* We must now allocate thishbp, since it may */
651: /* be on the wrong free list. */
652: }
653: } else if (size_needed > (signed_word)BL_LIMIT
654: && orig_avail - size_needed
655: > (signed_word)BL_LIMIT) {
656: /* Punt, since anything else risks unreasonable heap growth. */
1.4 ! noro 657: if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) {
! 658: WARN("Needed to allocate blacklisted block at 0x%lx\n",
! 659: (word)hbp);
! 660: }
1.1 noro 661: size_avail = orig_avail;
662: } else if (size_avail == 0 && size_needed == HBLKSIZE
663: && IS_MAPPED(hhdr)) {
1.2 noro 664: if (!GC_find_leak) {
1.1 noro 665: static unsigned count = 0;
666:
667: /* The block is completely blacklisted. We need */
668: /* to drop some such blocks, since otherwise we spend */
669: /* all our time traversing them if pointerfree */
670: /* blocks are unpopular. */
671: /* A dropped block will be reconsidered at next GC. */
672: if ((++count & 3) == 0) {
673: /* Allocate and drop the block in small chunks, to */
674: /* maximize the chance that we will recover some */
675: /* later. */
676: word total_size = hhdr -> hb_sz;
677: struct hblk * limit = hbp + divHBLKSZ(total_size);
678: struct hblk * h;
679: struct hblk * prev = hhdr -> hb_prev;
680:
681: GC_words_wasted += total_size;
682: GC_large_free_bytes -= total_size;
683: GC_remove_from_fl(hhdr, n);
684: for (h = hbp; h < limit; h++) {
1.3 noro 685: if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
1.1 noro 686: (void) setup_header(
687: hhdr,
1.4 ! noro 688: BYTES_TO_WORDS(HBLKSIZE),
1.1 noro 689: PTRFREE, 0); /* Cant fail */
690: if (GC_debugging_started) {
1.4 ! noro 691: BZERO(h, HBLKSIZE);
1.1 noro 692: }
693: }
694: }
695: /* Restore hbp to point at free block */
696: hbp = prev;
697: if (0 == hbp) {
698: return GC_allochblk_nth(sz, kind, flags, n);
699: }
700: hhdr = HDR(hbp);
701: }
1.2 noro 702: }
1.1 noro 703: }
704: }
705: if( size_avail >= size_needed ) {
706: # ifdef USE_MUNMAP
707: if (!IS_MAPPED(hhdr)) {
1.3 noro 708: GC_remap((ptr_t)hbp, hhdr -> hb_sz);
1.1 noro 709: hhdr -> hb_flags &= ~WAS_UNMAPPED;
710: }
711: # endif
712: /* hbp may be on the wrong freelist; the parameter n */
713: /* is important. */
714: hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
715: break;
716: }
717: }
718:
719: if (0 == hbp) return 0;
720:
721: /* Notify virtual dirty bit implementation that we are about to write. */
722: GC_write_hint(hbp);
723:
724: /* Add it to map of valid blocks */
725: if (!GC_install_counts(hbp, (word)size_needed)) return(0);
726: /* This leaks memory under very rare conditions. */
727:
728: /* Set up header */
729: if (!setup_header(hhdr, sz, kind, flags)) {
730: GC_remove_counts(hbp, (word)size_needed);
731: return(0); /* ditto */
732: }
733:
734: /* We just successfully allocated a block. Restart count of */
735: /* consecutive failures. */
736: {
737: extern unsigned GC_fail_count;
738:
739: GC_fail_count = 0;
740: }
741:
742: GC_large_free_bytes -= size_needed;
743:
744: GC_ASSERT(IS_MAPPED(hhdr));
745: return( hbp );
746: }
747:
748: struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
749:
750: /*
751: * Free a heap block.
752: *
753: * Coalesce the block with its neighbors if possible.
754: *
755: * All mark words are assumed to be cleared.
756: */
757: void
758: GC_freehblk(hbp)
759: struct hblk *hbp;
760: {
761: struct hblk *next, *prev;
762: hdr *hhdr, *prevhdr, *nexthdr;
763: signed_word size;
764:
765:
1.3 noro 766: GET_HDR(hbp, hhdr);
1.1 noro 767: size = hhdr->hb_sz;
768: size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
769: GC_remove_counts(hbp, (word)size);
770: hhdr->hb_sz = size;
771:
772: /* Check for duplicate deallocation in the easy case */
773: if (HBLK_IS_FREE(hhdr)) {
774: GC_printf1("Duplicate large block deallocation of 0x%lx\n",
775: (unsigned long) hbp);
776: }
777:
778: GC_ASSERT(IS_MAPPED(hhdr));
779: GC_invalidate_map(hhdr);
780: next = (struct hblk *)((word)hbp + size);
1.3 noro 781: GET_HDR(next, nexthdr);
1.1 noro 782: prev = GC_free_block_ending_at(hbp);
783: /* Coalesce with successor, if possible */
784: if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
785: GC_remove_from_fl(nexthdr, FL_UNKNOWN);
786: hhdr -> hb_sz += nexthdr -> hb_sz;
787: GC_remove_header(next);
788: }
789: /* Coalesce with predecessor, if possible. */
790: if (0 != prev) {
791: prevhdr = HDR(prev);
792: if (IS_MAPPED(prevhdr)) {
793: GC_remove_from_fl(prevhdr, FL_UNKNOWN);
794: prevhdr -> hb_sz += hhdr -> hb_sz;
795: GC_remove_header(hbp);
796: hbp = prev;
797: hhdr = prevhdr;
798: }
799: }
800:
801: GC_large_free_bytes += size;
802: GC_add_to_fl(hbp, hhdr);
803: }
804:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>