Annotation of OpenXM_contrib2/asir2000/gc5.3/alloc.c, Revision 1.2
1.1 noro 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4: * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
5: * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6: *
7: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9: *
10: * Permission is hereby granted to use or copy this program
11: * for any purpose, provided the above notices are retained on all copies.
12: * Permission to modify the code and to distribute modified code is granted,
13: * provided the above notices are retained, and a notice that the code was
14: * modified is included with the above copyright notice.
15: *
16: */
17:
18:
19: # include "gc_priv.h"
20:
21: # include <stdio.h>
22: # ifndef MACOS
23: # include <signal.h>
24: # include <sys/types.h>
25: # endif
26:
27: void GC_timerstart(),GC_timerstop();
1.2 ! noro 28: void SendHeapSize(), int_handler(), ox_usr1_handler();
1.1 noro 29:
30: /*
31: * Separate free lists are maintained for different sized objects
32: * up to MAXOBJSZ.
33: * The call GC_allocobj(i,k) ensures that the freelist for
34: * kind k objects of size i points to a non-empty
35: * free list. It returns a pointer to the first entry on the free list.
36: * In a single-threaded world, GC_allocobj may be called to allocate
37: * an object of (small) size i as follows:
38: *
39: * opp = &(GC_objfreelist[i]);
40: * if (*opp == 0) GC_allocobj(i, NORMAL);
41: * ptr = *opp;
42: * *opp = obj_link(ptr);
43: *
44: * Note that this is very fast if the free list is non-empty; it should
45: * only involve the execution of 4 or 5 simple instructions.
46: * All composite objects on freelists are cleared, except for
47: * their first word.
48: */
49:
50: /*
51: * The allocator uses GC_allochblk to allocate large chunks of objects.
52: * These chunks all start on addresses which are multiples of
53: * HBLKSZ. Each allocated chunk has an associated header,
54: * which can be located quickly based on the address of the chunk.
55: * (See headers.c for details.)
56: * This makes it possible to check quickly whether an
57: * arbitrary address corresponds to an object administered by the
58: * allocator.
59: */
60:
61: word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
62:
63: word GC_gc_no = 0;
64:
65: #ifndef SMALL_CONFIG
66: int GC_incremental = 0; /* By default, stop the world. */
67: #endif
68:
69: int GC_full_freq = 19; /* Every 20th collection is a full */
70: /* collection, whether we need it */
71: /* or not. */
72:
73: GC_bool GC_need_full_gc = FALSE;
74: /* Need full GC do to heap growth. */
75:
76: word GC_used_heap_size_after_full = 0;
77:
78: char * GC_copyright[] =
79: {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
80: "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
81: "Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
82: "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
83: " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
84: "See source code for details." };
85:
86: # include "version.h"
87:
88: /* some more variables */
89:
90: extern signed_word GC_mem_found; /* Number of reclaimed longwords */
91: /* after garbage collection */
92:
93: GC_bool GC_dont_expand = 0;
94:
95: word GC_free_space_numerator = 1;
96: word GC_free_space_divisor = 3;
97:
98: extern GC_bool GC_collection_in_progress();
99: /* Collection is in progress, or was abandoned. */
100:
101: int GC_never_stop_func GC_PROTO((void)) { return(0); }
102:
103: CLOCK_TYPE GC_start_time; /* Time at which we stopped world. */
104: /* used only in GC_timeout_stop_func. */
105:
106: int GC_n_attempts = 0; /* Number of attempts at finishing */
107: /* collection within TIME_LIMIT */
108:
109: #ifdef SMALL_CONFIG
110: # define GC_timeout_stop_func GC_never_stop_func
111: #else
112: int GC_timeout_stop_func GC_PROTO((void))
113: {
114: CLOCK_TYPE current_time;
115: static unsigned count = 0;
116: unsigned long time_diff;
117:
118: if ((count++ & 3) != 0) return(0);
119: GET_TIME(current_time);
120: time_diff = MS_TIME_DIFF(current_time,GC_start_time);
121: if (time_diff >= TIME_LIMIT) {
122: # ifdef PRINTSTATS
123: GC_printf0("Abandoning stopped marking after ");
124: GC_printf1("%lu msecs", (unsigned long)time_diff);
125: GC_printf1("(attempt %d)\n", (unsigned long) GC_n_attempts);
126: # endif
127: return(1);
128: }
129: return(0);
130: }
131: #endif /* !SMALL_CONFIG */
132:
133: /* Return the minimum number of words that must be allocated between */
134: /* collections to amortize the collection cost. */
135: static word min_words_allocd()
136: {
137: # ifdef THREADS
138: /* We punt, for now. */
139: register signed_word stack_size = 10000;
140: # else
141: int dummy;
142: register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
143: # endif
144: word total_root_size; /* includes double stack size, */
145: /* since the stack is expensive */
146: /* to scan. */
147: word scan_size; /* Estimate of memory to be scanned */
148: /* during normal GC. */
149:
150: if (stack_size < 0) stack_size = -stack_size;
151: total_root_size = 2 * stack_size + GC_root_size;
152: scan_size = BYTES_TO_WORDS(GC_heapsize - GC_large_free_bytes
153: + (GC_large_free_bytes >> 2)
154: /* use a bit more of large empty heap */
155: + total_root_size);
156: if (GC_incremental) {
157: return (scan_size*GC_free_space_numerator) / (2 * GC_free_space_divisor);
158: } else {
159: return (scan_size*GC_free_space_numerator) / GC_free_space_divisor;
160: }
161: }
162:
163: /* Return the number of words allocated, adjusted for explicit storage */
164: /* management, etc.. This number is used in deciding when to trigger */
165: /* collections. */
166: word GC_adj_words_allocd()
167: {
168: register signed_word result;
169: register signed_word expl_managed =
170: BYTES_TO_WORDS((long)GC_non_gc_bytes
171: - (long)GC_non_gc_bytes_at_gc);
172:
173: /* Don't count what was explicitly freed, or newly allocated for */
174: /* explicit management. Note that deallocating an explicitly */
175: /* managed object should not alter result, assuming the client */
176: /* is playing by the rules. */
177: result = (signed_word)GC_words_allocd
178: - (signed_word)GC_mem_freed - expl_managed;
179: if (result > (signed_word)GC_words_allocd) {
180: result = GC_words_allocd;
181: /* probably client bug or unfortunate scheduling */
182: }
183: result += GC_words_finalized;
184: /* We count objects enqueued for finalization as though they */
185: /* had been reallocated this round. Finalization is user */
186: /* visible progress. And if we don't count this, we have */
187: /* stability problems for programs that finalize all objects. */
188: result += GC_words_wasted;
189: /* This doesn't reflect useful work. But if there is lots of */
190: /* new fragmentation, the same is probably true of the heap, */
191: /* and the collection will be correspondingly cheaper. */
192: if (result < (signed_word)(GC_words_allocd >> 3)) {
193: /* Always count at least 1/8 of the allocations. We don't want */
194: /* to collect too infrequently, since that would inhibit */
195: /* coalescing of free storage blocks. */
196: /* This also makes us partially robust against client bugs. */
197: return(GC_words_allocd >> 3);
198: } else {
199: return(result);
200: }
201: }
202:
203:
204: /* Clear up a few frames worth of garbage left at the top of the stack. */
205: /* This is used to prevent us from accidentally treating garbade left */
206: /* on the stack by other parts of the collector as roots. This */
207: /* differs from the code in misc.c, which actually tries to keep the */
208: /* stack clear of long-lived, client-generated garbage. */
209: void GC_clear_a_few_frames()
210: {
211: # define NWORDS 64
212: word frames[NWORDS];
213: register int i;
214:
215: for (i = 0; i < NWORDS; i++) frames[i] = 0;
216: }
217:
218: /* Have we allocated enough to amortize a collection? */
219: GC_bool GC_should_collect()
220: {
221: return(GC_adj_words_allocd() >= min_words_allocd());
222: }
223:
224:
225: void GC_notify_full_gc()
226: {
227: if (GC_start_call_back != (void (*)())0) {
228: (*GC_start_call_back)();
229: }
230: }
231:
232: GC_bool GC_is_full_gc = FALSE;
233:
234: /*
235: * Initiate a garbage collection if appropriate.
236: * Choose judiciously
237: * between partial, full, and stop-world collections.
238: * Assumes lock held, signals disabled.
239: */
240: void GC_maybe_gc()
241: {
242: static int n_partial_gcs = 0;
243:
244: if (GC_should_collect()) {
245: if (!GC_incremental) {
246: GC_notify_full_gc();
247: GC_gcollect_inner();
248: n_partial_gcs = 0;
249: return;
250: } else if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
251: # ifdef PRINTSTATS
252: GC_printf2(
253: "***>Full mark for collection %lu after %ld allocd bytes\n",
254: (unsigned long) GC_gc_no+1,
255: (long)WORDS_TO_BYTES(GC_words_allocd));
256: # endif
257: GC_promote_black_lists();
258: (void)GC_reclaim_all((GC_stop_func)0, TRUE);
259: GC_clear_marks();
260: n_partial_gcs = 0;
261: GC_notify_full_gc();
262: GC_is_full_gc = TRUE;
263: } else {
264: n_partial_gcs++;
265: }
266: /* We try to mark with the world stopped. */
267: /* If we run out of time, this turns into */
268: /* incremental marking. */
269: GET_TIME(GC_start_time);
270: if (GC_stopped_mark(GC_timeout_stop_func)) {
271: # ifdef SAVE_CALL_CHAIN
272: GC_save_callers(GC_last_stack);
273: # endif
274: GC_finish_collection();
275: } else {
276: if (!GC_is_full_gc) {
277: /* Count this as the first attempt */
278: GC_n_attempts++;
279: }
280: }
281: }
282: }
283:
284:
285: /*
286: * Stop the world garbage collection. Assumes lock held, signals disabled.
287: * If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE.
288: */
289: GC_bool GC_try_to_collect_inner(stop_func)
290: GC_stop_func stop_func;
291: {
292: if (GC_incremental && GC_collection_in_progress()) {
293: # ifdef PRINTSTATS
294: GC_printf0(
295: "GC_try_to_collect_inner: finishing collection in progress\n");
296: # endif /* PRINTSTATS */
297: /* Just finish collection already in progress. */
298: while(GC_collection_in_progress()) {
299: if (stop_func()) return(FALSE);
300: GC_collect_a_little_inner(1);
301: }
302: }
303: # ifdef PRINTSTATS
304: GC_printf2(
305: "Initiating full world-stop collection %lu after %ld allocd bytes\n",
306: (unsigned long) GC_gc_no+1,
307: (long)WORDS_TO_BYTES(GC_words_allocd));
308: # endif
309: GC_promote_black_lists();
310: /* Make sure all blocks have been reclaimed, so sweep routines */
311: /* don't see cleared mark bits. */
312: /* If we're guaranteed to finish, then this is unnecessary. */
313: /* In the find_leak case, we have to finish to guarantee that */
314: /* previously unmarked objects are not reported as leaks. */
315: if ((GC_find_leak || stop_func != GC_never_stop_func)
316: && !GC_reclaim_all(stop_func, FALSE)) {
317: /* Aborted. So far everything is still consistent. */
318: return(FALSE);
319: }
320: GC_invalidate_mark_state(); /* Flush mark stack. */
321: GC_clear_marks();
322: # ifdef SAVE_CALL_CHAIN
323: GC_save_callers(GC_last_stack);
324: # endif
325: GC_is_full_gc = TRUE;
326: if (!GC_stopped_mark(stop_func)) {
327: if (!GC_incremental) {
328: /* We're partially done and have no way to complete or use */
329: /* current work. Reestablish invariants as cheaply as */
330: /* possible. */
331: GC_invalidate_mark_state();
332: GC_unpromote_black_lists();
333: } /* else we claim the world is already still consistent. We'll */
334: /* finish incrementally. */
335: return(FALSE);
336: }
337: GC_finish_collection();
338: return(TRUE);
339: }
340:
341:
342:
343: /*
344: * Perform n units of garbage collection work. A unit is intended to touch
345: * roughly GC_RATE pages. Every once in a while, we do more than that.
346: * This needa to be a fairly large number with our current incremental
347: * GC strategy, since otherwise we allocate too much during GC, and the
348: * cleanup gets expensive.
349: */
350: # define GC_RATE 10
351: # define MAX_PRIOR_ATTEMPTS 1
352: /* Maximum number of prior attempts at world stop marking */
353: /* A value of 1 means that we finish the seconf time, no matter */
354: /* how long it takes. Doesn't count the initial root scan */
355: /* for a full GC. */
356:
357: int GC_deficit = 0; /* The number of extra calls to GC_mark_some */
358: /* that we have made. */
359:
360: void GC_collect_a_little_inner(n)
361: int n;
362: {
363: register int i;
364:
365: if (GC_incremental && GC_collection_in_progress()) {
366: for (i = GC_deficit; i < GC_RATE*n; i++) {
367: if (GC_mark_some((ptr_t)0)) {
368: /* Need to finish a collection */
369: # ifdef SAVE_CALL_CHAIN
370: GC_save_callers(GC_last_stack);
371: # endif
372: if (GC_n_attempts < MAX_PRIOR_ATTEMPTS) {
373: GET_TIME(GC_start_time);
374: if (!GC_stopped_mark(GC_timeout_stop_func)) {
375: GC_n_attempts++;
376: break;
377: }
378: } else {
379: (void)GC_stopped_mark(GC_never_stop_func);
380: }
381: GC_finish_collection();
382: break;
383: }
384: }
385: if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
386: if (GC_deficit < 0) GC_deficit = 0;
387: } else {
388: GC_maybe_gc();
389: }
390: }
391:
392: int GC_collect_a_little GC_PROTO(())
393: {
394: int result;
395: DCL_LOCK_STATE;
396:
397: DISABLE_SIGNALS();
398: LOCK();
399: GC_collect_a_little_inner(1);
400: result = (int)GC_collection_in_progress();
401: UNLOCK();
402: ENABLE_SIGNALS();
403: return(result);
404: }
405:
406: /*
407: * Assumes lock is held, signals are disabled.
408: * We stop the world.
409: * If stop_func() ever returns TRUE, we may fail and return FALSE.
410: * Increment GC_gc_no if we succeed.
411: */
412: GC_bool GC_stopped_mark(stop_func)
413: GC_stop_func stop_func;
414: {
415: register int i;
416: int dummy;
417: # ifdef PRINTSTATS
418: CLOCK_TYPE start_time, current_time;
419: # endif
420:
421: STOP_WORLD();
422: GC_timerstart();
423: # ifdef PRINTSTATS
424: GET_TIME(start_time);
425: GC_printf1("--> Marking for collection %lu ",
426: (unsigned long) GC_gc_no + 1);
427: GC_printf2("after %lu allocd bytes + %lu wasted bytes\n",
428: (unsigned long) WORDS_TO_BYTES(GC_words_allocd),
429: (unsigned long) WORDS_TO_BYTES(GC_words_wasted));
430: # endif
431:
432: /* Mark from all roots. */
433: /* Minimize junk left in my registers and on the stack */
434: GC_clear_a_few_frames();
435: GC_noop(0,0,0,0,0,0);
436: GC_initiate_gc();
437: for(i = 0;;i++) {
438: if ((*stop_func)()) {
439: # ifdef PRINTSTATS
440: GC_printf0("Abandoned stopped marking after ");
441: GC_printf1("%lu iterations\n",
442: (unsigned long)i);
443: # endif
444: GC_timerstop();
445: GC_deficit = i; /* Give the mutator a chance. */
446: START_WORLD();
447: return(FALSE);
448: }
449: if (GC_mark_some((ptr_t)(&dummy))) break;
450: }
451:
452: GC_gc_no++;
453: # ifdef PRINTSTATS
454: GC_printf2("Collection %lu reclaimed %ld bytes",
455: (unsigned long) GC_gc_no - 1,
456: (long)WORDS_TO_BYTES(GC_mem_found));
457: GC_printf1(" ---> heapsize = %lu bytes\n",
458: (unsigned long) GC_heapsize);
459: /* Printf arguments may be pushed in funny places. Clear the */
460: /* space. */
461: GC_printf0("");
462: # endif
463:
464: /* Check all debugged objects for consistency */
465: if (GC_debugging_started) {
466: (*GC_check_heap)();
467: }
468:
469: # ifdef PRINTTIMES
470: GET_TIME(current_time);
471: GC_printf1("World-stopped marking took %lu msecs\n",
472: MS_TIME_DIFF(current_time,start_time));
473: # endif
474: GC_timerstop();
475: START_WORLD();
476: return(TRUE);
477: }
478:
479:
480: /* Finish up a collection. Assumes lock is held, signals are disabled, */
481: /* but the world is otherwise running. */
482: void GC_finish_collection()
483: {
484: # ifdef PRINTTIMES
485: CLOCK_TYPE start_time;
486: CLOCK_TYPE finalize_time;
487: CLOCK_TYPE done_time;
488:
489: GET_TIME(start_time);
490: finalize_time = start_time;
491: # endif
492: GC_timerstart();
493:
494: # ifdef GATHERSTATS
495: GC_mem_found = 0;
496: # endif
497: if (GC_find_leak) {
498: /* Mark all objects on the free list. All objects should be */
499: /* marked when we're done. */
500: {
501: register word size; /* current object size */
502: register ptr_t p; /* pointer to current object */
503: register struct hblk * h; /* pointer to block containing *p */
504: register hdr * hhdr;
505: register int word_no; /* "index" of *p in *q */
506: int kind;
507:
508: for (kind = 0; kind < GC_n_kinds; kind++) {
509: for (size = 1; size <= MAXOBJSZ; size++) {
510: for (p= GC_obj_kinds[kind].ok_freelist[size];
511: p != 0; p=obj_link(p)){
512: h = HBLKPTR(p);
513: hhdr = HDR(h);
514: word_no = (((word *)p) - ((word *)h));
515: set_mark_bit_from_hdr(hhdr, word_no);
516: }
517: }
518: }
519: }
520: GC_start_reclaim(TRUE);
521: /* The above just checks; it doesn't really reclaim anything. */
522: }
523:
524: GC_finalize();
525: # ifdef STUBBORN_ALLOC
526: GC_clean_changing_list();
527: # endif
528:
529: # ifdef PRINTTIMES
530: GET_TIME(finalize_time);
531: # endif
532:
533: /* Clear free list mark bits, in case they got accidentally marked */
534: /* Note: HBLKPTR(p) == pointer to head of block containing *p */
535: /* (or GC_find_leak is set and they were intentionally marked.) */
536: /* Also subtract memory remaining from GC_mem_found count. */
537: /* Note that composite objects on free list are cleared. */
538: /* Thus accidentally marking a free list is not a problem; only */
539: /* objects on the list itself will be marked, and that's fixed here. */
540: {
541: register word size; /* current object size */
542: register ptr_t p; /* pointer to current object */
543: register struct hblk * h; /* pointer to block containing *p */
544: register hdr * hhdr;
545: register int word_no; /* "index" of *p in *q */
546: int kind;
547:
548: for (kind = 0; kind < GC_n_kinds; kind++) {
549: for (size = 1; size <= MAXOBJSZ; size++) {
550: for (p= GC_obj_kinds[kind].ok_freelist[size];
551: p != 0; p=obj_link(p)){
552: h = HBLKPTR(p);
553: hhdr = HDR(h);
554: word_no = (((word *)p) - ((word *)h));
555: clear_mark_bit_from_hdr(hhdr, word_no);
556: # ifdef GATHERSTATS
557: GC_mem_found -= size;
558: # endif
559: }
560: }
561: }
562: }
563:
564:
565: # ifdef PRINTSTATS
566: GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n",
567: (long)WORDS_TO_BYTES(GC_mem_found));
568: # endif
569: /* Reconstruct free lists to contain everything not marked */
570: GC_start_reclaim(FALSE);
571: if (GC_is_full_gc) {
572: GC_used_heap_size_after_full = USED_HEAP_SIZE;
573: GC_need_full_gc = FALSE;
574: } else {
575: GC_need_full_gc =
576: BYTES_TO_WORDS(USED_HEAP_SIZE - GC_used_heap_size_after_full)
577: > min_words_allocd();
578: }
579:
580: # ifdef PRINTSTATS
581: GC_printf2(
582: "Immediately reclaimed %ld bytes in heap of size %lu bytes",
583: (long)WORDS_TO_BYTES(GC_mem_found),
584: (unsigned long)GC_heapsize);
585: # ifdef USE_MUNMAP
586: GC_printf1("(%lu unmapped)", GC_unmapped_bytes);
587: # endif
588: GC_printf2(
589: "\n%lu (atomic) + %lu (composite) collectable bytes in use\n",
590: (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
591: (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
592: # endif
593:
594: GC_n_attempts = 0;
595: GC_is_full_gc = FALSE;
596: /* Reset or increment counters for next cycle */
597: GC_words_allocd_before_gc += GC_words_allocd;
598: GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
599: GC_words_allocd = 0;
600: GC_words_wasted = 0;
601: GC_mem_freed = 0;
602:
603: # ifdef USE_MUNMAP
604: GC_unmap_old();
605: # endif
606: # ifdef PRINTTIMES
607: GET_TIME(done_time);
608: GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
609: MS_TIME_DIFF(finalize_time,start_time),
610: MS_TIME_DIFF(done_time,finalize_time));
611: # endif
612: GC_timerstop();
613: }
614:
615: /* Externally callable routine to invoke full, stop-world collection */
616: # if defined(__STDC__) || defined(__cplusplus)
617: int GC_try_to_collect(GC_stop_func stop_func)
618: # else
619: int GC_try_to_collect(stop_func)
620: GC_stop_func stop_func;
621: # endif
622: {
623: int result;
624: DCL_LOCK_STATE;
625:
626: GC_INVOKE_FINALIZERS();
627: DISABLE_SIGNALS();
628: LOCK();
629: ENTER_GC();
630: if (!GC_is_initialized) GC_init_inner();
631: /* Minimize junk left in my registers */
632: GC_noop(0,0,0,0,0,0);
633: result = (int)GC_try_to_collect_inner(stop_func);
634: EXIT_GC();
635: UNLOCK();
636: ENABLE_SIGNALS();
637: if(result) GC_INVOKE_FINALIZERS();
638: return(result);
639: }
640:
641: void GC_gcollect GC_PROTO(())
642: {
643: GC_notify_full_gc();
644: (void)GC_try_to_collect(GC_never_stop_func);
645: }
646:
647: word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
648:
649: /*
650: * Use the chunk of memory starting at p of size bytes as part of the heap.
651: * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
652: */
653: void GC_add_to_heap(p, bytes)
654: struct hblk *p;
655: word bytes;
656: {
657: word words;
658: hdr * phdr;
659:
660: if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
661: ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
662: }
663: phdr = GC_install_header(p);
664: if (0 == phdr) {
665: /* This is extremely unlikely. Can't add it. This will */
666: /* almost certainly result in a 0 return from the allocator, */
667: /* which is entirely appropriate. */
668: return;
669: }
670: GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
671: GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
672: GC_n_heap_sects++;
673: words = BYTES_TO_WORDS(bytes - HDR_BYTES);
674: phdr -> hb_sz = words;
675: phdr -> hb_map = (char *)1; /* A value != GC_invalid_map */
676: phdr -> hb_flags = 0;
677: GC_freehblk(p);
678: GC_heapsize += bytes;
679: if ((ptr_t)p <= GC_least_plausible_heap_addr
680: || GC_least_plausible_heap_addr == 0) {
681: GC_least_plausible_heap_addr = (ptr_t)p - sizeof(word);
682: /* Making it a little smaller than necessary prevents */
683: /* us from getting a false hit from the variable */
684: /* itself. There's some unintentional reflection */
685: /* here. */
686: }
687: if ((ptr_t)p + bytes >= GC_greatest_plausible_heap_addr) {
688: GC_greatest_plausible_heap_addr = (ptr_t)p + bytes;
689: }
690: }
691:
692: # if !defined(NO_DEBUGGING)
693: void GC_print_heap_sects()
694: {
695: register unsigned i;
696:
697: GC_printf1("Total heap size: %lu\n", (unsigned long) GC_heapsize);
698: for (i = 0; i < GC_n_heap_sects; i++) {
699: unsigned long start = (unsigned long) GC_heap_sects[i].hs_start;
700: unsigned long len = (unsigned long) GC_heap_sects[i].hs_bytes;
701: struct hblk *h;
702: unsigned nbl = 0;
703:
704: GC_printf3("Section %ld from 0x%lx to 0x%lx ", (unsigned long)i,
705: start, (unsigned long)(start + len));
706: for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
707: if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
708: }
709: GC_printf2("%lu/%lu blacklisted\n", (unsigned long)nbl,
710: (unsigned long)(len/HBLKSIZE));
711: }
712: }
713: # endif
714:
715: ptr_t GC_least_plausible_heap_addr = (ptr_t)ONES;
716: ptr_t GC_greatest_plausible_heap_addr = 0;
717:
718: ptr_t GC_max(x,y)
719: ptr_t x, y;
720: {
721: return(x > y? x : y);
722: }
723:
724: ptr_t GC_min(x,y)
725: ptr_t x, y;
726: {
727: return(x < y? x : y);
728: }
729:
730: # if defined(__STDC__) || defined(__cplusplus)
731: void GC_set_max_heap_size(GC_word n)
732: # else
733: void GC_set_max_heap_size(n)
734: GC_word n;
735: # endif
736: {
737: GC_max_heapsize = n;
738: }
739:
740: GC_word GC_max_retries = 0;
741:
742: /*
743: * this explicitly increases the size of the heap. It is used
744: * internally, but may also be invoked from GC_expand_hp by the user.
745: * The argument is in units of HBLKSIZE.
746: * Tiny values of n are rounded up.
747: * Returns FALSE on failure.
748: */
749: GC_bool GC_expand_hp_inner(n)
750: word n;
751: {
752: word bytes;
753: struct hblk * space;
754: word expansion_slop; /* Number of bytes by which we expect the */
755: /* heap to expand soon. */
756:
757: if (n < MINHINCR) n = MINHINCR;
758: bytes = n * HBLKSIZE;
759: /* Make sure bytes is a multiple of GC_page_size */
760: {
761: word mask = GC_page_size - 1;
762: bytes += mask;
763: bytes &= ~mask;
764: }
765:
766: if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
767: /* Exceeded self-imposed limit */
768: return(FALSE);
769: }
770: space = GET_MEM(bytes);
771: if( space == 0 ) {
772: return(FALSE);
773: }
774: # ifdef PRINTSTATS
775: GC_printf2("Increasing heap size by %lu after %lu allocated bytes\n",
776: (unsigned long)bytes,
777: (unsigned long)WORDS_TO_BYTES(GC_words_allocd));
778: # ifdef UNDEFINED
779: GC_printf1("Root size = %lu\n", GC_root_size);
780: GC_print_block_list(); GC_print_hblkfreelist();
781: GC_printf0("\n");
782: # endif
783: # endif
784: expansion_slop = 8 * WORDS_TO_BYTES(min_words_allocd());
785: if (5 * HBLKSIZE * MAXHINCR > expansion_slop) {
786: expansion_slop = 5 * HBLKSIZE * MAXHINCR;
787: }
788: if (GC_last_heap_addr == 0 && !((word)space & SIGNB)
789: || GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space) {
790: /* Assume the heap is growing up */
791: GC_greatest_plausible_heap_addr =
792: GC_max(GC_greatest_plausible_heap_addr,
793: (ptr_t)space + bytes + expansion_slop);
794: } else {
795: /* Heap is growing down */
796: GC_least_plausible_heap_addr =
797: GC_min(GC_least_plausible_heap_addr,
798: (ptr_t)space - expansion_slop);
799: }
800: GC_prev_heap_addr = GC_last_heap_addr;
801: GC_last_heap_addr = (ptr_t)space;
802: GC_add_to_heap(space, bytes);
803: #if defined(VISUAL_LIB)
804: SendHeapSize();
805: #endif
806: return(TRUE);
807: }
808:
809: /* Really returns a bool, but it's externally visible, so that's clumsy. */
810: /* Arguments is in bytes. */
811: # if defined(__STDC__) || defined(__cplusplus)
812: int GC_expand_hp(size_t bytes)
813: # else
814: int GC_expand_hp(bytes)
815: size_t bytes;
816: # endif
817: {
818: int result;
819: DCL_LOCK_STATE;
820:
821: DISABLE_SIGNALS();
822: LOCK();
823: if (!GC_is_initialized) GC_init_inner();
824: result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
825: if (result) GC_requested_heapsize += bytes;
826: UNLOCK();
827: ENABLE_SIGNALS();
828: return(result);
829: }
830:
831: unsigned GC_fail_count = 0;
832: /* How many consecutive GC/expansion failures? */
833: /* Reset by GC_allochblk. */
834:
835: GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
836: word needed_blocks;
837: GC_bool ignore_off_page;
838: {
839: if (!GC_incremental && !GC_dont_gc &&
840: (GC_dont_expand && GC_words_allocd > 0 || GC_should_collect())) {
841: GC_notify_full_gc();
842: GC_gcollect_inner();
843: } else {
844: word blocks_to_get = (GC_heapsize*GC_free_space_numerator)/(HBLKSIZE*GC_free_space_divisor)
845: + needed_blocks;
846:
847: if (blocks_to_get > MAXHINCR) {
848: word slop;
849:
850: if (ignore_off_page) {
851: slop = 4;
852: } else {
853: slop = 2*divHBLKSZ(BL_LIMIT);
854: if (slop > needed_blocks) slop = needed_blocks;
855: }
856: if (needed_blocks + slop > MAXHINCR) {
857: blocks_to_get = needed_blocks + slop;
858: } else {
859: blocks_to_get = MAXHINCR;
860: }
861: }
862: if (!GC_expand_hp_inner(blocks_to_get)
863: && !GC_expand_hp_inner(needed_blocks)) {
864: if (GC_fail_count++ < GC_max_retries) {
865: WARN("Out of Memory! Trying to continue ...\n", 0);
866: GC_notify_full_gc();
867: GC_gcollect_inner();
868: } else {
869: WARN("Out of Memory! Returning NIL!\n", 0);
870: return(FALSE);
871: }
872: } else {
873: # ifdef PRINTSTATS
874: if (GC_fail_count) {
875: GC_printf0("Memory available again ...\n");
876: }
877: # endif
878: }
879: }
880: return(TRUE);
881: }
882:
883: /*
884: * Make sure the object free list for sz is not empty.
885: * Return a pointer to the first object on the free list.
886: * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
887: * Assumes we hold the allocator lock and signals are disabled.
888: *
889: */
890: ptr_t GC_allocobj(sz, kind)
891: word sz;
892: int kind;
893: {
894: register ptr_t * flh = &(GC_obj_kinds[kind].ok_freelist[sz]);
895:
896: if (sz == 0) return(0);
897:
898: #if defined(VISUAL)
899: {
900: #include <signal.h>
901: extern int recv_intr;
902: if ( recv_intr ) {
903: if ( recv_intr == 1 ) {
904: recv_intr = 0;
905: int_handler();
906: } else {
907: recv_intr = 0;
908: ox_usr1_handler(0);
909: }
910: }
911: }
912: #endif
913: while (*flh == 0) {
914: ENTER_GC();
915: /* Do our share of marking work */
916: if(GC_incremental && !GC_dont_gc) GC_collect_a_little_inner(1);
917: /* Sweep blocks for objects of this size */
918: GC_continue_reclaim(sz, kind);
919: EXIT_GC();
920: if (*flh == 0) {
921: GC_new_hblk(sz, kind);
922: }
923: if (*flh == 0) {
924: ENTER_GC();
925: if (!GC_collect_or_expand((word)1,FALSE)) {
926: EXIT_GC();
927: return(0);
928: }
929: EXIT_GC();
930: }
931: }
932:
933: return(*flh);
934: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>