Annotation of OpenXM_contrib/gc/malloc.c, Revision 1.1.1.2
1.1 maekawa 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4: *
5: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7: *
8: * Permission is hereby granted to use or copy this program
9: * for any purpose, provided the above notices are retained on all copies.
10: * Permission to modify the code and to distribute modified code is granted,
11: * provided the above notices are retained, and a notice that the code was
12: * modified is included with the above copyright notice.
13: */
14: /* Boehm, February 7, 1996 4:32 pm PST */
15:
16: #include <stdio.h>
17: #include "gc_priv.h"
18:
19: extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
20: void GC_extend_size_map(); /* in misc.c. */
21:
22: /* Allocate reclaim list for kind: */
23: /* Return TRUE on success */
24: GC_bool GC_alloc_reclaim_list(kind)
25: register struct obj_kind * kind;
26: {
27: struct hblk ** result = (struct hblk **)
28: GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
29: if (result == 0) return(FALSE);
30: BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
31: kind -> ok_reclaim_list = result;
32: return(TRUE);
33: }
34:
35: /* allocate lb bytes for an object of kind. */
36: /* Should not be used to directly to allocate */
37: /* objects such as STUBBORN objects that */
38: /* require special handling on allocation. */
39: /* First a version that assumes we already */
40: /* hold lock: */
41: ptr_t GC_generic_malloc_inner(lb, k)
42: register word lb;
43: register int k;
44: {
45: register word lw;
46: register ptr_t op;
47: register ptr_t *opp;
48:
49: if( SMALL_OBJ(lb) ) {
50: register struct obj_kind * kind = GC_obj_kinds + k;
51: # ifdef MERGE_SIZES
52: lw = GC_size_map[lb];
53: # else
54: lw = ALIGNED_WORDS(lb);
55: if (lw == 0) lw = 1;
56: # endif
57: opp = &(kind -> ok_freelist[lw]);
58: if( (op = *opp) == 0 ) {
59: # ifdef MERGE_SIZES
60: if (GC_size_map[lb] == 0) {
61: if (!GC_is_initialized) GC_init_inner();
62: if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
63: return(GC_generic_malloc_inner(lb, k));
64: }
65: # else
66: if (!GC_is_initialized) {
67: GC_init_inner();
68: return(GC_generic_malloc_inner(lb, k));
69: }
70: # endif
71: if (kind -> ok_reclaim_list == 0) {
72: if (!GC_alloc_reclaim_list(kind)) goto out;
73: }
74: op = GC_allocobj(lw, k);
75: if (op == 0) goto out;
76: }
77: /* Here everything is in a consistent state. */
78: /* We assume the following assignment is */
79: /* atomic. If we get aborted */
80: /* after the assignment, we lose an object, */
81: /* but that's benign. */
82: /* Volatile declarations may need to be added */
83: /* to prevent the compiler from breaking things.*/
84: *opp = obj_link(op);
85: obj_link(op) = 0;
86: } else {
87: register struct hblk * h;
88: register word n_blocks = divHBLKSZ(ADD_SLOP(lb)
89: + HDR_BYTES + HBLKSIZE-1);
90:
91: if (!GC_is_initialized) GC_init_inner();
92: /* Do our share of marking work */
93: if(GC_incremental && !GC_dont_gc)
94: GC_collect_a_little_inner((int)n_blocks);
95: lw = ROUNDED_UP_WORDS(lb);
1.1.1.2 ! maekawa 96: h = GC_allochblk(lw, k, 0);
! 97: # ifdef USE_MUNMAP
! 98: if (0 == h) {
! 99: GC_merge_unmapped();
! 100: h = GC_allochblk(lw, k, 0);
! 101: }
! 102: # endif
! 103: while (0 == h && GC_collect_or_expand(n_blocks, FALSE)) {
! 104: h = GC_allochblk(lw, k, 0);
! 105: }
1.1 maekawa 106: if (h == 0) {
107: op = 0;
108: } else {
109: op = (ptr_t) (h -> hb_body);
110: GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
111: }
112: }
113: GC_words_allocd += lw;
114:
115: out:
116: return((ptr_t)op);
117: }
118:
119: ptr_t GC_generic_malloc(lb, k)
120: register word lb;
121: register int k;
122: {
123: ptr_t result;
124: DCL_LOCK_STATE;
125:
126: GC_INVOKE_FINALIZERS();
127: DISABLE_SIGNALS();
128: LOCK();
129: result = GC_generic_malloc_inner(lb, k);
130: UNLOCK();
131: ENABLE_SIGNALS();
132: if (0 == result) {
133: return((*GC_oom_fn)(lb));
134: } else {
135: return(result);
136: }
137: }
138:
139:
140: #define GENERAL_MALLOC(lb,k) \
141: (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
142: /* We make the GC_clear_stack_call a tail call, hoping to get more of */
143: /* the stack. */
144:
145: /* Allocate lb bytes of atomic (pointerfree) data */
146: # ifdef __STDC__
147: GC_PTR GC_malloc_atomic(size_t lb)
148: # else
149: GC_PTR GC_malloc_atomic(lb)
150: size_t lb;
151: # endif
152: {
153: register ptr_t op;
154: register ptr_t * opp;
155: register word lw;
156: DCL_LOCK_STATE;
157:
158: if( SMALL_OBJ(lb) ) {
159: # ifdef MERGE_SIZES
160: lw = GC_size_map[lb];
161: # else
162: lw = ALIGNED_WORDS(lb);
163: # endif
164: opp = &(GC_aobjfreelist[lw]);
165: FASTLOCK();
166: if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
167: FASTUNLOCK();
168: return(GENERAL_MALLOC((word)lb, PTRFREE));
169: }
170: /* See above comment on signals. */
171: *opp = obj_link(op);
172: GC_words_allocd += lw;
173: FASTUNLOCK();
174: return((GC_PTR) op);
175: } else {
176: return(GENERAL_MALLOC((word)lb, PTRFREE));
177: }
178: }
179:
180: /* Allocate lb bytes of composite (pointerful) data */
181: # ifdef __STDC__
182: GC_PTR GC_malloc(size_t lb)
183: # else
184: GC_PTR GC_malloc(lb)
185: size_t lb;
186: # endif
187: {
188: register ptr_t op;
189: register ptr_t *opp;
190: register word lw;
191: DCL_LOCK_STATE;
192:
193: if( SMALL_OBJ(lb) ) {
194: # ifdef MERGE_SIZES
195: lw = GC_size_map[lb];
196: # else
197: lw = ALIGNED_WORDS(lb);
198: # endif
199: opp = &(GC_objfreelist[lw]);
200: FASTLOCK();
201: if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
202: FASTUNLOCK();
203: return(GENERAL_MALLOC((word)lb, NORMAL));
204: }
205: /* See above comment on signals. */
206: *opp = obj_link(op);
207: obj_link(op) = 0;
208: GC_words_allocd += lw;
209: FASTUNLOCK();
210: return((GC_PTR) op);
211: } else {
212: return(GENERAL_MALLOC((word)lb, NORMAL));
213: }
214: }
215:
216: # ifdef REDIRECT_MALLOC
217: # ifdef __STDC__
218: GC_PTR malloc(size_t lb)
219: # else
220: GC_PTR malloc(lb)
221: size_t lb;
222: # endif
223: {
224: /* It might help to manually inline the GC_malloc call here. */
225: /* But any decent compiler should reduce the extra procedure call */
226: /* to at most a jump instruction in this case. */
227: # if defined(I386) && defined(SOLARIS_THREADS)
228: /*
229: * Thread initialisation can call malloc before
230: * we're ready for it.
231: * It's not clear that this is enough to help matters.
232: * The thread implementation may well call malloc at other
233: * inopportune times.
234: */
235: if (!GC_is_initialized) return sbrk(lb);
236: # endif /* I386 && SOLARIS_THREADS */
237: return(REDIRECT_MALLOC(lb));
238: }
239:
240: # ifdef __STDC__
241: GC_PTR calloc(size_t n, size_t lb)
242: # else
243: GC_PTR calloc(n, lb)
244: size_t n, lb;
245: # endif
246: {
247: return(REDIRECT_MALLOC(n*lb));
248: }
249: # endif /* REDIRECT_MALLOC */
250:
251: GC_PTR GC_generic_or_special_malloc(lb,knd)
252: word lb;
253: int knd;
254: {
255: switch(knd) {
256: # ifdef STUBBORN_ALLOC
257: case STUBBORN:
258: return(GC_malloc_stubborn((size_t)lb));
259: # endif
260: case PTRFREE:
261: return(GC_malloc_atomic((size_t)lb));
262: case NORMAL:
263: return(GC_malloc((size_t)lb));
264: case UNCOLLECTABLE:
265: return(GC_malloc_uncollectable((size_t)lb));
266: # ifdef ATOMIC_UNCOLLECTABLE
267: case AUNCOLLECTABLE:
268: return(GC_malloc_atomic_uncollectable((size_t)lb));
269: # endif /* ATOMIC_UNCOLLECTABLE */
270: default:
271: return(GC_generic_malloc(lb,knd));
272: }
273: }
274:
275:
276: /* Change the size of the block pointed to by p to contain at least */
277: /* lb bytes. The object may be (and quite likely will be) moved. */
278: /* The kind (e.g. atomic) is the same as that of the old. */
279: /* Shrinking of large blocks is not implemented well. */
280: # ifdef __STDC__
281: GC_PTR GC_realloc(GC_PTR p, size_t lb)
282: # else
283: GC_PTR GC_realloc(p,lb)
284: GC_PTR p;
285: size_t lb;
286: # endif
287: {
288: register struct hblk * h;
289: register hdr * hhdr;
290: register word sz; /* Current size in bytes */
291: register word orig_sz; /* Original sz in bytes */
292: int obj_kind;
293:
294: if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
295: h = HBLKPTR(p);
296: hhdr = HDR(h);
297: sz = hhdr -> hb_sz;
298: obj_kind = hhdr -> hb_obj_kind;
299: sz = WORDS_TO_BYTES(sz);
300: orig_sz = sz;
301:
302: if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
303: /* Round it up to the next whole heap block */
304: register word descr;
305:
306: sz = (sz+HDR_BYTES+HBLKSIZE-1)
307: & (~HBLKMASK);
308: sz -= HDR_BYTES;
309: hhdr -> hb_sz = BYTES_TO_WORDS(sz);
310: descr = GC_obj_kinds[obj_kind].ok_descriptor;
311: if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
312: hhdr -> hb_descr = descr;
313: if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
314: /* Extra area is already cleared by allochblk. */
315: }
316: if (ADD_SLOP(lb) <= sz) {
317: if (lb >= (sz >> 1)) {
318: # ifdef STUBBORN_ALLOC
319: if (obj_kind == STUBBORN) GC_change_stubborn(p);
320: # endif
321: if (orig_sz > lb) {
322: /* Clear unneeded part of object to avoid bogus pointer */
323: /* tracing. */
324: /* Safe for stubborn objects. */
325: BZERO(((ptr_t)p) + lb, orig_sz - lb);
326: }
327: return(p);
328: } else {
329: /* shrink */
330: GC_PTR result =
331: GC_generic_or_special_malloc((word)lb, obj_kind);
332:
333: if (result == 0) return(0);
334: /* Could also return original object. But this */
335: /* gives the client warning of imminent disaster. */
336: BCOPY(p, result, lb);
337: # ifndef IGNORE_FREE
338: GC_free(p);
339: # endif
340: return(result);
341: }
342: } else {
343: /* grow */
344: GC_PTR result =
345: GC_generic_or_special_malloc((word)lb, obj_kind);
346:
347: if (result == 0) return(0);
348: BCOPY(p, result, sz);
349: # ifndef IGNORE_FREE
350: GC_free(p);
351: # endif
352: return(result);
353: }
354: }
355:
356: # ifdef REDIRECT_MALLOC
357: # ifdef __STDC__
358: GC_PTR realloc(GC_PTR p, size_t lb)
359: # else
360: GC_PTR realloc(p,lb)
361: GC_PTR p;
362: size_t lb;
363: # endif
364: {
365: return(GC_realloc(p, lb));
366: }
367: # endif /* REDIRECT_MALLOC */
368:
369: /* Explicitly deallocate an object p. */
370: # ifdef __STDC__
371: void GC_free(GC_PTR p)
372: # else
373: void GC_free(p)
374: GC_PTR p;
375: # endif
376: {
377: register struct hblk *h;
378: register hdr *hhdr;
379: register signed_word sz;
380: register ptr_t * flh;
381: register int knd;
382: register struct obj_kind * ok;
383: DCL_LOCK_STATE;
384:
385: if (p == 0) return;
386: /* Required by ANSI. It's not my fault ... */
387: h = HBLKPTR(p);
388: hhdr = HDR(h);
389: # if defined(REDIRECT_MALLOC) && \
390: (defined(SOLARIS_THREADS) || defined(LINUX_THREADS))
391: /* We have to redirect malloc calls during initialization. */
392: /* Don't try to deallocate that memory. */
393: if (0 == hhdr) return;
394: # endif
395: knd = hhdr -> hb_obj_kind;
396: sz = hhdr -> hb_sz;
397: ok = &GC_obj_kinds[knd];
398: if (sz <= MAXOBJSZ) {
399: # ifdef THREADS
400: DISABLE_SIGNALS();
401: LOCK();
402: # endif
403: GC_mem_freed += sz;
404: /* A signal here can make GC_mem_freed and GC_non_gc_bytes */
405: /* inconsistent. We claim this is benign. */
406: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
407: /* Its unnecessary to clear the mark bit. If the */
408: /* object is reallocated, it doesn't matter. O.w. the */
409: /* collector will do it, since it's on a free list. */
410: if (ok -> ok_init) {
411: BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
412: }
413: flh = &(ok -> ok_freelist[sz]);
414: obj_link(p) = *flh;
415: *flh = (ptr_t)p;
416: # ifdef THREADS
417: UNLOCK();
418: ENABLE_SIGNALS();
419: # endif
420: } else {
421: DISABLE_SIGNALS();
422: LOCK();
423: GC_mem_freed += sz;
424: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
425: GC_freehblk(h);
426: UNLOCK();
427: ENABLE_SIGNALS();
428: }
429: }
430:
431: # ifdef REDIRECT_MALLOC
432: # ifdef __STDC__
433: void free(GC_PTR p)
434: # else
435: void free(p)
436: GC_PTR p;
437: # endif
438: {
439: # ifndef IGNORE_FREE
440: GC_free(p);
441: # endif
442: }
443: # endif /* REDIRECT_MALLOC */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>