Annotation of OpenXM_contrib/gc/malloc.c, Revision 1.1.1.1
1.1 maekawa 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4: *
5: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7: *
8: * Permission is hereby granted to use or copy this program
9: * for any purpose, provided the above notices are retained on all copies.
10: * Permission to modify the code and to distribute modified code is granted,
11: * provided the above notices are retained, and a notice that the code was
12: * modified is included with the above copyright notice.
13: */
14: /* Boehm, February 7, 1996 4:32 pm PST */
15:
16: #include <stdio.h>
17: #include "gc_priv.h"
18:
19: extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
20: void GC_extend_size_map(); /* in misc.c. */
21:
22: /* Allocate reclaim list for kind: */
23: /* Return TRUE on success */
24: GC_bool GC_alloc_reclaim_list(kind)
25: register struct obj_kind * kind;
26: {
27: struct hblk ** result = (struct hblk **)
28: GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
29: if (result == 0) return(FALSE);
30: BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
31: kind -> ok_reclaim_list = result;
32: return(TRUE);
33: }
34:
35: /* allocate lb bytes for an object of kind. */
36: /* Should not be used to directly to allocate */
37: /* objects such as STUBBORN objects that */
38: /* require special handling on allocation. */
39: /* First a version that assumes we already */
40: /* hold lock: */
41: ptr_t GC_generic_malloc_inner(lb, k)
42: register word lb;
43: register int k;
44: {
45: register word lw;
46: register ptr_t op;
47: register ptr_t *opp;
48:
49: if( SMALL_OBJ(lb) ) {
50: register struct obj_kind * kind = GC_obj_kinds + k;
51: # ifdef MERGE_SIZES
52: lw = GC_size_map[lb];
53: # else
54: lw = ALIGNED_WORDS(lb);
55: if (lw == 0) lw = 1;
56: # endif
57: opp = &(kind -> ok_freelist[lw]);
58: if( (op = *opp) == 0 ) {
59: # ifdef MERGE_SIZES
60: if (GC_size_map[lb] == 0) {
61: if (!GC_is_initialized) GC_init_inner();
62: if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
63: return(GC_generic_malloc_inner(lb, k));
64: }
65: # else
66: if (!GC_is_initialized) {
67: GC_init_inner();
68: return(GC_generic_malloc_inner(lb, k));
69: }
70: # endif
71: if (kind -> ok_reclaim_list == 0) {
72: if (!GC_alloc_reclaim_list(kind)) goto out;
73: }
74: op = GC_allocobj(lw, k);
75: if (op == 0) goto out;
76: }
77: /* Here everything is in a consistent state. */
78: /* We assume the following assignment is */
79: /* atomic. If we get aborted */
80: /* after the assignment, we lose an object, */
81: /* but that's benign. */
82: /* Volatile declarations may need to be added */
83: /* to prevent the compiler from breaking things.*/
84: *opp = obj_link(op);
85: obj_link(op) = 0;
86: } else {
87: register struct hblk * h;
88: register word n_blocks = divHBLKSZ(ADD_SLOP(lb)
89: + HDR_BYTES + HBLKSIZE-1);
90:
91: if (!GC_is_initialized) GC_init_inner();
92: /* Do our share of marking work */
93: if(GC_incremental && !GC_dont_gc)
94: GC_collect_a_little_inner((int)n_blocks);
95: lw = ROUNDED_UP_WORDS(lb);
96: while ((h = GC_allochblk(lw, k, 0)) == 0
97: && GC_collect_or_expand(n_blocks, FALSE));
98: if (h == 0) {
99: op = 0;
100: } else {
101: op = (ptr_t) (h -> hb_body);
102: GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
103: }
104: }
105: GC_words_allocd += lw;
106:
107: out:
108: return((ptr_t)op);
109: }
110:
111: ptr_t GC_generic_malloc(lb, k)
112: register word lb;
113: register int k;
114: {
115: ptr_t result;
116: DCL_LOCK_STATE;
117:
118: GC_INVOKE_FINALIZERS();
119: DISABLE_SIGNALS();
120: LOCK();
121: result = GC_generic_malloc_inner(lb, k);
122: UNLOCK();
123: ENABLE_SIGNALS();
124: if (0 == result) {
125: return((*GC_oom_fn)(lb));
126: } else {
127: return(result);
128: }
129: }
130:
131:
132: #define GENERAL_MALLOC(lb,k) \
133: (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
134: /* We make the GC_clear_stack_call a tail call, hoping to get more of */
135: /* the stack. */
136:
137: /* Allocate lb bytes of atomic (pointerfree) data */
138: # ifdef __STDC__
139: GC_PTR GC_malloc_atomic(size_t lb)
140: # else
141: GC_PTR GC_malloc_atomic(lb)
142: size_t lb;
143: # endif
144: {
145: register ptr_t op;
146: register ptr_t * opp;
147: register word lw;
148: DCL_LOCK_STATE;
149:
150: if( SMALL_OBJ(lb) ) {
151: # ifdef MERGE_SIZES
152: lw = GC_size_map[lb];
153: # else
154: lw = ALIGNED_WORDS(lb);
155: # endif
156: opp = &(GC_aobjfreelist[lw]);
157: FASTLOCK();
158: if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
159: FASTUNLOCK();
160: return(GENERAL_MALLOC((word)lb, PTRFREE));
161: }
162: /* See above comment on signals. */
163: *opp = obj_link(op);
164: GC_words_allocd += lw;
165: FASTUNLOCK();
166: return((GC_PTR) op);
167: } else {
168: return(GENERAL_MALLOC((word)lb, PTRFREE));
169: }
170: }
171:
172: /* Allocate lb bytes of composite (pointerful) data */
173: # ifdef __STDC__
174: GC_PTR GC_malloc(size_t lb)
175: # else
176: GC_PTR GC_malloc(lb)
177: size_t lb;
178: # endif
179: {
180: register ptr_t op;
181: register ptr_t *opp;
182: register word lw;
183: DCL_LOCK_STATE;
184:
185: if( SMALL_OBJ(lb) ) {
186: # ifdef MERGE_SIZES
187: lw = GC_size_map[lb];
188: # else
189: lw = ALIGNED_WORDS(lb);
190: # endif
191: opp = &(GC_objfreelist[lw]);
192: FASTLOCK();
193: if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
194: FASTUNLOCK();
195: return(GENERAL_MALLOC((word)lb, NORMAL));
196: }
197: /* See above comment on signals. */
198: *opp = obj_link(op);
199: obj_link(op) = 0;
200: GC_words_allocd += lw;
201: FASTUNLOCK();
202: return((GC_PTR) op);
203: } else {
204: return(GENERAL_MALLOC((word)lb, NORMAL));
205: }
206: }
207:
208: # ifdef REDIRECT_MALLOC
209: # ifdef __STDC__
210: GC_PTR malloc(size_t lb)
211: # else
212: GC_PTR malloc(lb)
213: size_t lb;
214: # endif
215: {
216: /* It might help to manually inline the GC_malloc call here. */
217: /* But any decent compiler should reduce the extra procedure call */
218: /* to at most a jump instruction in this case. */
219: # if defined(I386) && defined(SOLARIS_THREADS)
220: /*
221: * Thread initialisation can call malloc before
222: * we're ready for it.
223: * It's not clear that this is enough to help matters.
224: * The thread implementation may well call malloc at other
225: * inopportune times.
226: */
227: if (!GC_is_initialized) return sbrk(lb);
228: # endif /* I386 && SOLARIS_THREADS */
229: return(REDIRECT_MALLOC(lb));
230: }
231:
232: # ifdef __STDC__
233: GC_PTR calloc(size_t n, size_t lb)
234: # else
235: GC_PTR calloc(n, lb)
236: size_t n, lb;
237: # endif
238: {
239: return(REDIRECT_MALLOC(n*lb));
240: }
241: # endif /* REDIRECT_MALLOC */
242:
243: GC_PTR GC_generic_or_special_malloc(lb,knd)
244: word lb;
245: int knd;
246: {
247: switch(knd) {
248: # ifdef STUBBORN_ALLOC
249: case STUBBORN:
250: return(GC_malloc_stubborn((size_t)lb));
251: # endif
252: case PTRFREE:
253: return(GC_malloc_atomic((size_t)lb));
254: case NORMAL:
255: return(GC_malloc((size_t)lb));
256: case UNCOLLECTABLE:
257: return(GC_malloc_uncollectable((size_t)lb));
258: # ifdef ATOMIC_UNCOLLECTABLE
259: case AUNCOLLECTABLE:
260: return(GC_malloc_atomic_uncollectable((size_t)lb));
261: # endif /* ATOMIC_UNCOLLECTABLE */
262: default:
263: return(GC_generic_malloc(lb,knd));
264: }
265: }
266:
267:
268: /* Change the size of the block pointed to by p to contain at least */
269: /* lb bytes. The object may be (and quite likely will be) moved. */
270: /* The kind (e.g. atomic) is the same as that of the old. */
271: /* Shrinking of large blocks is not implemented well. */
272: # ifdef __STDC__
273: GC_PTR GC_realloc(GC_PTR p, size_t lb)
274: # else
275: GC_PTR GC_realloc(p,lb)
276: GC_PTR p;
277: size_t lb;
278: # endif
279: {
280: register struct hblk * h;
281: register hdr * hhdr;
282: register word sz; /* Current size in bytes */
283: register word orig_sz; /* Original sz in bytes */
284: int obj_kind;
285:
286: if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
287: h = HBLKPTR(p);
288: hhdr = HDR(h);
289: sz = hhdr -> hb_sz;
290: obj_kind = hhdr -> hb_obj_kind;
291: sz = WORDS_TO_BYTES(sz);
292: orig_sz = sz;
293:
294: if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
295: /* Round it up to the next whole heap block */
296: register word descr;
297:
298: sz = (sz+HDR_BYTES+HBLKSIZE-1)
299: & (~HBLKMASK);
300: sz -= HDR_BYTES;
301: hhdr -> hb_sz = BYTES_TO_WORDS(sz);
302: descr = GC_obj_kinds[obj_kind].ok_descriptor;
303: if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
304: hhdr -> hb_descr = descr;
305: if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
306: /* Extra area is already cleared by allochblk. */
307: }
308: if (ADD_SLOP(lb) <= sz) {
309: if (lb >= (sz >> 1)) {
310: # ifdef STUBBORN_ALLOC
311: if (obj_kind == STUBBORN) GC_change_stubborn(p);
312: # endif
313: if (orig_sz > lb) {
314: /* Clear unneeded part of object to avoid bogus pointer */
315: /* tracing. */
316: /* Safe for stubborn objects. */
317: BZERO(((ptr_t)p) + lb, orig_sz - lb);
318: }
319: return(p);
320: } else {
321: /* shrink */
322: GC_PTR result =
323: GC_generic_or_special_malloc((word)lb, obj_kind);
324:
325: if (result == 0) return(0);
326: /* Could also return original object. But this */
327: /* gives the client warning of imminent disaster. */
328: BCOPY(p, result, lb);
329: # ifndef IGNORE_FREE
330: GC_free(p);
331: # endif
332: return(result);
333: }
334: } else {
335: /* grow */
336: GC_PTR result =
337: GC_generic_or_special_malloc((word)lb, obj_kind);
338:
339: if (result == 0) return(0);
340: BCOPY(p, result, sz);
341: # ifndef IGNORE_FREE
342: GC_free(p);
343: # endif
344: return(result);
345: }
346: }
347:
348: # ifdef REDIRECT_MALLOC
349: # ifdef __STDC__
350: GC_PTR realloc(GC_PTR p, size_t lb)
351: # else
352: GC_PTR realloc(p,lb)
353: GC_PTR p;
354: size_t lb;
355: # endif
356: {
357: return(GC_realloc(p, lb));
358: }
359: # endif /* REDIRECT_MALLOC */
360:
361: /* Explicitly deallocate an object p. */
362: # ifdef __STDC__
363: void GC_free(GC_PTR p)
364: # else
365: void GC_free(p)
366: GC_PTR p;
367: # endif
368: {
369: register struct hblk *h;
370: register hdr *hhdr;
371: register signed_word sz;
372: register ptr_t * flh;
373: register int knd;
374: register struct obj_kind * ok;
375: DCL_LOCK_STATE;
376:
377: if (p == 0) return;
378: /* Required by ANSI. It's not my fault ... */
379: h = HBLKPTR(p);
380: hhdr = HDR(h);
381: # if defined(REDIRECT_MALLOC) && \
382: (defined(SOLARIS_THREADS) || defined(LINUX_THREADS))
383: /* We have to redirect malloc calls during initialization. */
384: /* Don't try to deallocate that memory. */
385: if (0 == hhdr) return;
386: # endif
387: knd = hhdr -> hb_obj_kind;
388: sz = hhdr -> hb_sz;
389: ok = &GC_obj_kinds[knd];
390: if (sz <= MAXOBJSZ) {
391: # ifdef THREADS
392: DISABLE_SIGNALS();
393: LOCK();
394: # endif
395: GC_mem_freed += sz;
396: /* A signal here can make GC_mem_freed and GC_non_gc_bytes */
397: /* inconsistent. We claim this is benign. */
398: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
399: /* Its unnecessary to clear the mark bit. If the */
400: /* object is reallocated, it doesn't matter. O.w. the */
401: /* collector will do it, since it's on a free list. */
402: if (ok -> ok_init) {
403: BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
404: }
405: flh = &(ok -> ok_freelist[sz]);
406: obj_link(p) = *flh;
407: *flh = (ptr_t)p;
408: # ifdef THREADS
409: UNLOCK();
410: ENABLE_SIGNALS();
411: # endif
412: } else {
413: DISABLE_SIGNALS();
414: LOCK();
415: GC_mem_freed += sz;
416: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
417: GC_freehblk(h);
418: UNLOCK();
419: ENABLE_SIGNALS();
420: }
421: }
422:
423: # ifdef REDIRECT_MALLOC
424: # ifdef __STDC__
425: void free(GC_PTR p)
426: # else
427: void free(p)
428: GC_PTR p;
429: # endif
430: {
431: # ifndef IGNORE_FREE
432: GC_free(p);
433: # endif
434: }
435: # endif /* REDIRECT_MALLOC */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>