Annotation of OpenXM_contrib/gc/finalize.c, Revision 1.1.1.3
1.1 maekawa 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
1.1.1.2 maekawa 4: * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
1.1 maekawa 5:
6: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8: *
9: * Permission is hereby granted to use or copy this program
10: * for any purpose, provided the above notices are retained on all copies.
11: * Permission to modify the code and to distribute modified code is granted,
12: * provided the above notices are retained, and a notice that the code was
13: * modified is included with the above copyright notice.
14: */
15: /* Boehm, February 1, 1996 1:19 pm PST */
16: # define I_HIDE_POINTERS
17: # include "gc_priv.h"
18: # include "gc_mark.h"
19:
1.1.1.2 maekawa 20: # ifdef FINALIZE_ON_DEMAND
21: int GC_finalize_on_demand = 1;
22: # else
23: int GC_finalize_on_demand = 0;
24: # endif
25:
26: # ifdef JAVA_FINALIZATION
27: int GC_java_finalization = 1;
28: # else
29: int GC_java_finalization = 0;
30: # endif
31:
1.1 maekawa 32: /* Type of mark procedure used for marking from finalizable object. */
33: /* This procedure normally does not mark the object, only its */
34: /* descendents. */
35: typedef void finalization_mark_proc(/* ptr_t finalizable_obj_ptr */);
36:
37: # define HASH3(addr,size,log_size) \
38: ((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
39: & ((size) - 1))
40: #define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
41:
42: struct hash_chain_entry {
43: word hidden_key;
44: struct hash_chain_entry * next;
45: };
46:
47: unsigned GC_finalization_failures = 0;
48: /* Number of finalization requests that failed for lack of memory. */
49:
50: static struct disappearing_link {
51: struct hash_chain_entry prolog;
52: # define dl_hidden_link prolog.hidden_key
53: /* Field to be cleared. */
54: # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
55: # define dl_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
56:
57: word dl_hidden_obj; /* Pointer to object base */
58: } **dl_head = 0;
59:
60: static signed_word log_dl_table_size = -1;
61: /* Binary log of */
62: /* current size of array pointed to by dl_head. */
63: /* -1 ==> size is 0. */
64:
65: word GC_dl_entries = 0; /* Number of entries currently in disappearing */
66: /* link table. */
67:
68: static struct finalizable_object {
69: struct hash_chain_entry prolog;
70: # define fo_hidden_base prolog.hidden_key
71: /* Pointer to object base. */
72: /* No longer hidden once object */
73: /* is on finalize_now queue. */
74: # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
75: # define fo_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
76: GC_finalization_proc fo_fn; /* Finalizer. */
77: ptr_t fo_client_data;
78: word fo_object_size; /* In bytes. */
79: finalization_mark_proc * fo_mark_proc; /* Mark-through procedure */
80: } **fo_head = 0;
81:
82: struct finalizable_object * GC_finalize_now = 0;
83: /* LIst of objects that should be finalized now. */
84:
85: static signed_word log_fo_table_size = -1;
86:
87: word GC_fo_entries = 0;
88:
89: # ifdef SRC_M3
90: void GC_push_finalizer_structures()
91: {
92: GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
93: GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
94: }
95: # endif
96:
97: /* Double the size of a hash table. *size_ptr is the log of its current */
98: /* size. May be a noop. */
99: /* *table is a pointer to an array of hash headers. If we succeed, we */
100: /* update both *table and *log_size_ptr. */
101: /* Lock is held. Signals are disabled. */
102: void GC_grow_table(table, log_size_ptr)
103: struct hash_chain_entry ***table;
104: signed_word * log_size_ptr;
105: {
106: register word i;
107: register struct hash_chain_entry *p;
108: int log_old_size = *log_size_ptr;
109: register int log_new_size = log_old_size + 1;
110: word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
111: register word new_size = 1 << log_new_size;
112: struct hash_chain_entry **new_table = (struct hash_chain_entry **)
113: GC_generic_malloc_inner_ignore_off_page(
114: (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
115:
116: if (new_table == 0) {
117: if (table == 0) {
118: ABORT("Insufficient space for initial table allocation");
119: } else {
120: return;
121: }
122: }
123: for (i = 0; i < old_size; i++) {
124: p = (*table)[i];
125: while (p != 0) {
126: register ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
127: register struct hash_chain_entry *next = p -> next;
128: register int new_hash = HASH3(real_key, new_size, log_new_size);
129:
130: p -> next = new_table[new_hash];
131: new_table[new_hash] = p;
132: p = next;
133: }
134: }
135: *log_size_ptr = log_new_size;
136: *table = new_table;
137: }
138:
139: # if defined(__STDC__) || defined(__cplusplus)
140: int GC_register_disappearing_link(GC_PTR * link)
141: # else
142: int GC_register_disappearing_link(link)
143: GC_PTR * link;
144: # endif
145: {
146: ptr_t base;
147:
148: base = (ptr_t)GC_base((GC_PTR)link);
149: if (base == 0)
150: ABORT("Bad arg to GC_register_disappearing_link");
151: return(GC_general_register_disappearing_link(link, base));
152: }
153:
154: # if defined(__STDC__) || defined(__cplusplus)
155: int GC_general_register_disappearing_link(GC_PTR * link,
156: GC_PTR obj)
157: # else
158: int GC_general_register_disappearing_link(link, obj)
159: GC_PTR * link;
160: GC_PTR obj;
161: # endif
162:
163: {
164: struct disappearing_link *curr_dl;
165: int index;
166: struct disappearing_link * new_dl;
167: DCL_LOCK_STATE;
168:
169: if ((word)link & (ALIGNMENT-1))
170: ABORT("Bad arg to GC_general_register_disappearing_link");
171: # ifdef THREADS
172: DISABLE_SIGNALS();
173: LOCK();
174: # endif
175: if (log_dl_table_size == -1
176: || GC_dl_entries > ((word)1 << log_dl_table_size)) {
177: # ifndef THREADS
178: DISABLE_SIGNALS();
179: # endif
180: GC_grow_table((struct hash_chain_entry ***)(&dl_head),
181: &log_dl_table_size);
182: # ifdef PRINTSTATS
183: GC_printf1("Grew dl table to %lu entries\n",
184: (unsigned long)(1 << log_dl_table_size));
185: # endif
186: # ifndef THREADS
187: ENABLE_SIGNALS();
188: # endif
189: }
190: index = HASH2(link, log_dl_table_size);
191: curr_dl = dl_head[index];
192: for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
193: if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
194: curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
195: # ifdef THREADS
196: UNLOCK();
197: ENABLE_SIGNALS();
198: # endif
199: return(1);
200: }
201: }
202: # ifdef THREADS
203: new_dl = (struct disappearing_link *)
204: GC_generic_malloc_inner(sizeof(struct disappearing_link),NORMAL);
205: # else
206: new_dl = (struct disappearing_link *)
207: GC_malloc(sizeof(struct disappearing_link));
208: # endif
209: if (new_dl != 0) {
210: new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
211: new_dl -> dl_hidden_link = HIDE_POINTER(link);
212: dl_set_next(new_dl, dl_head[index]);
213: dl_head[index] = new_dl;
214: GC_dl_entries++;
215: } else {
216: GC_finalization_failures++;
217: }
218: # ifdef THREADS
219: UNLOCK();
220: ENABLE_SIGNALS();
221: # endif
222: return(0);
223: }
224:
225: # if defined(__STDC__) || defined(__cplusplus)
226: int GC_unregister_disappearing_link(GC_PTR * link)
227: # else
228: int GC_unregister_disappearing_link(link)
229: GC_PTR * link;
230: # endif
231: {
232: struct disappearing_link *curr_dl, *prev_dl;
233: int index;
234: DCL_LOCK_STATE;
235:
236: DISABLE_SIGNALS();
237: LOCK();
238: index = HASH2(link, log_dl_table_size);
239: if (((unsigned long)link & (ALIGNMENT-1))) goto out;
240: prev_dl = 0; curr_dl = dl_head[index];
241: while (curr_dl != 0) {
242: if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
243: if (prev_dl == 0) {
244: dl_head[index] = dl_next(curr_dl);
245: } else {
246: dl_set_next(prev_dl, dl_next(curr_dl));
247: }
248: GC_dl_entries--;
249: UNLOCK();
250: ENABLE_SIGNALS();
251: GC_free((GC_PTR)curr_dl);
252: return(1);
253: }
254: prev_dl = curr_dl;
255: curr_dl = dl_next(curr_dl);
256: }
257: out:
258: UNLOCK();
259: ENABLE_SIGNALS();
260: return(0);
261: }
262:
263: /* Possible finalization_marker procedures. Note that mark stack */
264: /* overflow is handled by the caller, and is not a disaster. */
1.1.1.2 maekawa 265: GC_API void GC_normal_finalize_mark_proc(p)
1.1 maekawa 266: ptr_t p;
267: {
268: hdr * hhdr = HDR(p);
269:
270: PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top,
271: &(GC_mark_stack[GC_mark_stack_size]));
272: }
273:
274: /* This only pays very partial attention to the mark descriptor. */
275: /* It does the right thing for normal and atomic objects, and treats */
276: /* most others as normal. */
1.1.1.2 maekawa 277: GC_API void GC_ignore_self_finalize_mark_proc(p)
1.1 maekawa 278: ptr_t p;
279: {
280: hdr * hhdr = HDR(p);
281: word descr = hhdr -> hb_descr;
282: ptr_t q, r;
283: ptr_t scan_limit;
284: ptr_t target_limit = p + WORDS_TO_BYTES(hhdr -> hb_sz) - 1;
285:
286: if ((descr & DS_TAGS) == DS_LENGTH) {
287: scan_limit = p + descr - sizeof(word);
288: } else {
289: scan_limit = target_limit + 1 - sizeof(word);
290: }
291: for (q = p; q <= scan_limit; q += ALIGNMENT) {
292: r = *(ptr_t *)q;
293: if (r < p || r > target_limit) {
294: GC_PUSH_ONE_HEAP((word)r, q);
295: }
296: }
297: }
298:
299: /*ARGSUSED*/
1.1.1.2 maekawa 300: GC_API void GC_null_finalize_mark_proc(p)
1.1 maekawa 301: ptr_t p;
302: {
303: }
304:
305:
306:
307: /* Register a finalization function. See gc.h for details. */
308: /* in the nonthreads case, we try to avoid disabling signals, */
309: /* since it can be expensive. Threads packages typically */
310: /* make it cheaper. */
1.1.1.2 maekawa 311: /* The last parameter is a procedure that determines */
312: /* marking for finalization ordering. Any objects marked */
313: /* by that procedure will be guaranteed to not have been */
314: /* finalized when this finalizer is invoked. */
315: GC_API void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
1.1 maekawa 316: GC_PTR obj;
317: GC_finalization_proc fn;
318: GC_PTR cd;
319: GC_finalization_proc * ofn;
320: GC_PTR * ocd;
321: finalization_mark_proc * mp;
322: {
323: ptr_t base;
324: struct finalizable_object * curr_fo, * prev_fo;
325: int index;
326: struct finalizable_object *new_fo;
327: DCL_LOCK_STATE;
328:
329: # ifdef THREADS
330: DISABLE_SIGNALS();
331: LOCK();
332: # endif
333: if (log_fo_table_size == -1
334: || GC_fo_entries > ((word)1 << log_fo_table_size)) {
335: # ifndef THREADS
336: DISABLE_SIGNALS();
337: # endif
338: GC_grow_table((struct hash_chain_entry ***)(&fo_head),
339: &log_fo_table_size);
340: # ifdef PRINTSTATS
341: GC_printf1("Grew fo table to %lu entries\n",
342: (unsigned long)(1 << log_fo_table_size));
343: # endif
344: # ifndef THREADS
345: ENABLE_SIGNALS();
346: # endif
347: }
348: /* in the THREADS case signals are disabled and we hold allocation */
349: /* lock; otherwise neither is true. Proceed carefully. */
350: base = (ptr_t)obj;
351: index = HASH2(base, log_fo_table_size);
352: prev_fo = 0; curr_fo = fo_head[index];
353: while (curr_fo != 0) {
354: if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
355: /* Interruption by a signal in the middle of this */
356: /* should be safe. The client may see only *ocd */
357: /* updated, but we'll declare that to be his */
358: /* problem. */
359: if (ocd) *ocd = (GC_PTR) curr_fo -> fo_client_data;
360: if (ofn) *ofn = curr_fo -> fo_fn;
361: /* Delete the structure for base. */
362: if (prev_fo == 0) {
363: fo_head[index] = fo_next(curr_fo);
364: } else {
365: fo_set_next(prev_fo, fo_next(curr_fo));
366: }
367: if (fn == 0) {
368: GC_fo_entries--;
369: /* May not happen if we get a signal. But a high */
370: /* estimate will only make the table larger than */
371: /* necessary. */
372: # ifndef THREADS
373: GC_free((GC_PTR)curr_fo);
374: # endif
375: } else {
376: curr_fo -> fo_fn = fn;
377: curr_fo -> fo_client_data = (ptr_t)cd;
378: curr_fo -> fo_mark_proc = mp;
379: /* Reinsert it. We deleted it first to maintain */
380: /* consistency in the event of a signal. */
381: if (prev_fo == 0) {
382: fo_head[index] = curr_fo;
383: } else {
384: fo_set_next(prev_fo, curr_fo);
385: }
386: }
387: # ifdef THREADS
388: UNLOCK();
389: ENABLE_SIGNALS();
390: # endif
391: return;
392: }
393: prev_fo = curr_fo;
394: curr_fo = fo_next(curr_fo);
395: }
396: if (ofn) *ofn = 0;
397: if (ocd) *ocd = 0;
398: if (fn == 0) {
399: # ifdef THREADS
400: UNLOCK();
401: ENABLE_SIGNALS();
402: # endif
403: return;
404: }
405: # ifdef THREADS
406: new_fo = (struct finalizable_object *)
407: GC_generic_malloc_inner(sizeof(struct finalizable_object),NORMAL);
408: # else
409: new_fo = (struct finalizable_object *)
410: GC_malloc(sizeof(struct finalizable_object));
411: # endif
412: if (new_fo != 0) {
413: new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
414: new_fo -> fo_fn = fn;
415: new_fo -> fo_client_data = (ptr_t)cd;
416: new_fo -> fo_object_size = GC_size(base);
417: new_fo -> fo_mark_proc = mp;
418: fo_set_next(new_fo, fo_head[index]);
419: GC_fo_entries++;
420: fo_head[index] = new_fo;
421: } else {
422: GC_finalization_failures++;
423: }
424: # ifdef THREADS
425: UNLOCK();
426: ENABLE_SIGNALS();
427: # endif
428: }
429:
430: # if defined(__STDC__)
431: void GC_register_finalizer(void * obj,
432: GC_finalization_proc fn, void * cd,
433: GC_finalization_proc *ofn, void ** ocd)
434: # else
435: void GC_register_finalizer(obj, fn, cd, ofn, ocd)
436: GC_PTR obj;
437: GC_finalization_proc fn;
438: GC_PTR cd;
439: GC_finalization_proc * ofn;
440: GC_PTR * ocd;
441: # endif
442: {
443: GC_register_finalizer_inner(obj, fn, cd, ofn,
444: ocd, GC_normal_finalize_mark_proc);
445: }
446:
447: # if defined(__STDC__)
448: void GC_register_finalizer_ignore_self(void * obj,
449: GC_finalization_proc fn, void * cd,
450: GC_finalization_proc *ofn, void ** ocd)
451: # else
452: void GC_register_finalizer_ignore_self(obj, fn, cd, ofn, ocd)
453: GC_PTR obj;
454: GC_finalization_proc fn;
455: GC_PTR cd;
456: GC_finalization_proc * ofn;
457: GC_PTR * ocd;
458: # endif
459: {
460: GC_register_finalizer_inner(obj, fn, cd, ofn,
461: ocd, GC_ignore_self_finalize_mark_proc);
462: }
463:
464: # if defined(__STDC__)
465: void GC_register_finalizer_no_order(void * obj,
466: GC_finalization_proc fn, void * cd,
467: GC_finalization_proc *ofn, void ** ocd)
468: # else
469: void GC_register_finalizer_no_order(obj, fn, cd, ofn, ocd)
470: GC_PTR obj;
471: GC_finalization_proc fn;
472: GC_PTR cd;
473: GC_finalization_proc * ofn;
474: GC_PTR * ocd;
475: # endif
476: {
477: GC_register_finalizer_inner(obj, fn, cd, ofn,
478: ocd, GC_null_finalize_mark_proc);
479: }
480:
481: /* Called with world stopped. Cause disappearing links to disappear, */
482: /* and invoke finalizers. */
483: void GC_finalize()
484: {
485: struct disappearing_link * curr_dl, * prev_dl, * next_dl;
486: struct finalizable_object * curr_fo, * prev_fo, * next_fo;
487: ptr_t real_ptr, real_link;
488: register int i;
489: int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
490: int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
491:
492: /* Make disappearing links disappear */
493: for (i = 0; i < dl_size; i++) {
494: curr_dl = dl_head[i];
495: prev_dl = 0;
496: while (curr_dl != 0) {
497: real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
498: real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
499: if (!GC_is_marked(real_ptr)) {
500: *(word *)real_link = 0;
501: next_dl = dl_next(curr_dl);
502: if (prev_dl == 0) {
503: dl_head[i] = next_dl;
504: } else {
505: dl_set_next(prev_dl, next_dl);
506: }
507: GC_clear_mark_bit((ptr_t)curr_dl);
508: GC_dl_entries--;
509: curr_dl = next_dl;
510: } else {
511: prev_dl = curr_dl;
512: curr_dl = dl_next(curr_dl);
513: }
514: }
515: }
516: /* Mark all objects reachable via chains of 1 or more pointers */
517: /* from finalizable objects. */
518: # ifdef PRINTSTATS
519: if (GC_mark_state != MS_NONE) ABORT("Bad mark state");
520: # endif
521: for (i = 0; i < fo_size; i++) {
522: for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
523: real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
524: if (!GC_is_marked(real_ptr)) {
1.1.1.2 maekawa 525: GC_MARKED_FOR_FINALIZATION(real_ptr);
1.1 maekawa 526: GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
527: if (GC_is_marked(real_ptr)) {
528: WARN("Finalization cycle involving %lx\n", real_ptr);
529: }
530: }
531: }
532: }
533: /* Enqueue for finalization all objects that are still */
534: /* unreachable. */
535: GC_words_finalized = 0;
536: for (i = 0; i < fo_size; i++) {
537: curr_fo = fo_head[i];
538: prev_fo = 0;
539: while (curr_fo != 0) {
540: real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
541: if (!GC_is_marked(real_ptr)) {
1.1.1.2 maekawa 542: if (!GC_java_finalization) {
543: GC_set_mark_bit(real_ptr);
544: }
1.1 maekawa 545: /* Delete from hash table */
546: next_fo = fo_next(curr_fo);
547: if (prev_fo == 0) {
548: fo_head[i] = next_fo;
549: } else {
550: fo_set_next(prev_fo, next_fo);
551: }
552: GC_fo_entries--;
553: /* Add to list of objects awaiting finalization. */
554: fo_set_next(curr_fo, GC_finalize_now);
555: GC_finalize_now = curr_fo;
556: /* unhide object pointer so any future collections will */
557: /* see it. */
558: curr_fo -> fo_hidden_base =
559: (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
560: GC_words_finalized +=
561: ALIGNED_WORDS(curr_fo -> fo_object_size)
562: + ALIGNED_WORDS(sizeof(struct finalizable_object));
563: # ifdef PRINTSTATS
564: if (!GC_is_marked((ptr_t)curr_fo)) {
565: ABORT("GC_finalize: found accessible unmarked object\n");
566: }
567: # endif
568: curr_fo = next_fo;
569: } else {
570: prev_fo = curr_fo;
571: curr_fo = fo_next(curr_fo);
572: }
573: }
574: }
575:
1.1.1.2 maekawa 576: if (GC_java_finalization) {
577: /* make sure we mark everything reachable from objects finalized
578: using the no_order mark_proc */
579: for (curr_fo = GC_finalize_now;
580: curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
581: real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
582: if (!GC_is_marked(real_ptr)) {
583: if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
584: GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
585: }
586: GC_set_mark_bit(real_ptr);
587: }
588: }
589: }
1.1 maekawa 590:
591: /* Remove dangling disappearing links. */
592: for (i = 0; i < dl_size; i++) {
593: curr_dl = dl_head[i];
594: prev_dl = 0;
595: while (curr_dl != 0) {
596: real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
597: if (real_link != 0 && !GC_is_marked(real_link)) {
598: next_dl = dl_next(curr_dl);
599: if (prev_dl == 0) {
600: dl_head[i] = next_dl;
601: } else {
602: dl_set_next(prev_dl, next_dl);
603: }
604: GC_clear_mark_bit((ptr_t)curr_dl);
605: GC_dl_entries--;
606: curr_dl = next_dl;
607: } else {
608: prev_dl = curr_dl;
609: curr_dl = dl_next(curr_dl);
610: }
611: }
612: }
613: }
614:
1.1.1.2 maekawa 615: #ifndef JAVA_FINALIZATION_NOT_NEEDED
1.1 maekawa 616:
617: /* Enqueue all remaining finalizers to be run - Assumes lock is
618: * held, and signals are disabled */
619: void GC_enqueue_all_finalizers()
620: {
621: struct finalizable_object * curr_fo, * prev_fo, * next_fo;
1.1.1.3 ! maekawa 622: ptr_t real_ptr;
1.1 maekawa 623: register int i;
624: int fo_size;
625:
626: fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
627: GC_words_finalized = 0;
628: for (i = 0; i < fo_size; i++) {
629: curr_fo = fo_head[i];
630: prev_fo = 0;
631: while (curr_fo != 0) {
632: real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
633: GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
634: GC_set_mark_bit(real_ptr);
635:
636: /* Delete from hash table */
637: next_fo = fo_next(curr_fo);
638: if (prev_fo == 0) {
639: fo_head[i] = next_fo;
640: } else {
641: fo_set_next(prev_fo, next_fo);
642: }
643: GC_fo_entries--;
644:
645: /* Add to list of objects awaiting finalization. */
646: fo_set_next(curr_fo, GC_finalize_now);
647: GC_finalize_now = curr_fo;
648:
649: /* unhide object pointer so any future collections will */
650: /* see it. */
651: curr_fo -> fo_hidden_base =
652: (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
653:
654: GC_words_finalized +=
655: ALIGNED_WORDS(curr_fo -> fo_object_size)
656: + ALIGNED_WORDS(sizeof(struct finalizable_object));
657: curr_fo = next_fo;
658: }
659: }
660:
661: return;
662: }
663:
664: /* Invoke all remaining finalizers that haven't yet been run.
665: * This is needed for strict compliance with the Java standard,
666: * which can make the runtime guarantee that all finalizers are run.
667: * Unfortunately, the Java standard implies we have to keep running
668: * finalizers until there are no more left, a potential infinite loop.
669: * YUCK.
1.1.1.2 maekawa 670: * Note that this is even more dangerous than the usual Java
671: * finalizers, in that objects reachable from static variables
672: * may have been finalized when these finalizers are run.
673: * Finalizers run at this point must be prepared to deal with a
674: * mostly broken world.
1.1 maekawa 675: * This routine is externally callable, so is called without
676: * the allocation lock.
677: */
1.1.1.2 maekawa 678: GC_API void GC_finalize_all()
1.1 maekawa 679: {
680: DCL_LOCK_STATE;
681:
682: DISABLE_SIGNALS();
683: LOCK();
684: while (GC_fo_entries > 0) {
685: GC_enqueue_all_finalizers();
686: UNLOCK();
687: ENABLE_SIGNALS();
688: GC_INVOKE_FINALIZERS();
689: DISABLE_SIGNALS();
690: LOCK();
691: }
692: UNLOCK();
693: ENABLE_SIGNALS();
694: }
695: #endif
1.1.1.3 ! maekawa 696:
! 697: /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
! 698: /* finalizers can only be called from some kind of `safe state' and */
! 699: /* getting into that safe state is expensive.) */
! 700: int GC_should_invoke_finalizers GC_PROTO((void))
! 701: {
! 702: return GC_finalize_now != 0;
! 703: }
1.1 maekawa 704:
705: /* Invoke finalizers for all objects that are ready to be finalized. */
706: /* Should be called without allocation lock. */
707: int GC_invoke_finalizers()
708: {
709: register struct finalizable_object * curr_fo;
710: register int count = 0;
711: DCL_LOCK_STATE;
712:
713: while (GC_finalize_now != 0) {
714: # ifdef THREADS
715: DISABLE_SIGNALS();
716: LOCK();
717: # endif
718: curr_fo = GC_finalize_now;
719: # ifdef THREADS
720: if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
721: UNLOCK();
722: ENABLE_SIGNALS();
723: if (curr_fo == 0) break;
724: # else
725: GC_finalize_now = fo_next(curr_fo);
726: # endif
727: fo_set_next(curr_fo, 0);
728: (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
729: curr_fo -> fo_client_data);
730: curr_fo -> fo_client_data = 0;
731: ++count;
732: # ifdef UNDEFINED
733: /* This is probably a bad idea. It throws off accounting if */
734: /* nearly all objects are finalizable. O.w. it shouldn't */
735: /* matter. */
736: GC_free((GC_PTR)curr_fo);
737: # endif
738: }
739: return count;
740: }
741:
742: # ifdef __STDC__
743: GC_PTR GC_call_with_alloc_lock(GC_fn_type fn,
744: GC_PTR client_data)
745: # else
746: GC_PTR GC_call_with_alloc_lock(fn, client_data)
747: GC_fn_type fn;
748: GC_PTR client_data;
749: # endif
750: {
751: GC_PTR result;
752: DCL_LOCK_STATE;
753:
754: # ifdef THREADS
755: DISABLE_SIGNALS();
756: LOCK();
757: SET_LOCK_HOLDER();
758: # endif
759: result = (*fn)(client_data);
760: # ifdef THREADS
761: UNSET_LOCK_HOLDER();
762: UNLOCK();
763: ENABLE_SIGNALS();
764: # endif
765: return(result);
766: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>