Annotation of OpenXM_contrib2/asir2000/gc5.3/malloc.c, Revision 1.1.1.1
1.1 noro 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4: *
5: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7: *
8: * Permission is hereby granted to use or copy this program
9: * for any purpose, provided the above notices are retained on all copies.
10: * Permission to modify the code and to distribute modified code is granted,
11: * provided the above notices are retained, and a notice that the code was
12: * modified is included with the above copyright notice.
13: */
14: /* Boehm, February 7, 1996 4:32 pm PST */
15:
16: #include <stdio.h>
17: #include "gc_priv.h"
18:
19: extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
20: void GC_extend_size_map(); /* in misc.c. */
21:
22: /* Allocate reclaim list for kind: */
23: /* Return TRUE on success */
24: GC_bool GC_alloc_reclaim_list(kind)
25: register struct obj_kind * kind;
26: {
27: struct hblk ** result = (struct hblk **)
28: GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
29: if (result == 0) return(FALSE);
30: BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
31: kind -> ok_reclaim_list = result;
32: return(TRUE);
33: }
34:
35: /* allocate lb bytes for an object of kind. */
36: /* Should not be used to directly to allocate */
37: /* objects such as STUBBORN objects that */
38: /* require special handling on allocation. */
39: /* First a version that assumes we already */
40: /* hold lock: */
41: ptr_t GC_generic_malloc_inner(lb, k)
42: register word lb;
43: register int k;
44: {
45: register word lw;
46: register ptr_t op;
47: register ptr_t *opp;
48:
49: if( SMALL_OBJ(lb) ) {
50: register struct obj_kind * kind = GC_obj_kinds + k;
51: # ifdef MERGE_SIZES
52: lw = GC_size_map[lb];
53: # else
54: lw = ALIGNED_WORDS(lb);
55: if (lw == 0) lw = 1;
56: # endif
57: opp = &(kind -> ok_freelist[lw]);
58: if( (op = *opp) == 0 ) {
59: # ifdef MERGE_SIZES
60: if (GC_size_map[lb] == 0) {
61: if (!GC_is_initialized) GC_init_inner();
62: if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
63: return(GC_generic_malloc_inner(lb, k));
64: }
65: # else
66: if (!GC_is_initialized) {
67: GC_init_inner();
68: return(GC_generic_malloc_inner(lb, k));
69: }
70: # endif
71: if (kind -> ok_reclaim_list == 0) {
72: if (!GC_alloc_reclaim_list(kind)) goto out;
73: }
74: op = GC_allocobj(lw, k);
75: if (op == 0) goto out;
76: }
77: /* Here everything is in a consistent state. */
78: /* We assume the following assignment is */
79: /* atomic. If we get aborted */
80: /* after the assignment, we lose an object, */
81: /* but that's benign. */
82: /* Volatile declarations may need to be added */
83: /* to prevent the compiler from breaking things.*/
84: /* If we only execute the second of the */
85: /* following assignments, we lose the free */
86: /* list, but that should still be OK, at least */
87: /* for garbage collected memory. */
88: *opp = obj_link(op);
89: obj_link(op) = 0;
90: } else {
91: register struct hblk * h;
92: register word n_blocks = divHBLKSZ(ADD_SLOP(lb)
93: + HDR_BYTES + HBLKSIZE-1);
94:
95: if (!GC_is_initialized) GC_init_inner();
96: /* Do our share of marking work */
97: if(GC_incremental && !GC_dont_gc)
98: GC_collect_a_little_inner((int)n_blocks);
99: lw = ROUNDED_UP_WORDS(lb);
100: h = GC_allochblk(lw, k, 0);
101: # ifdef USE_MUNMAP
102: if (0 == h) {
103: GC_merge_unmapped();
104: h = GC_allochblk(lw, k, 0);
105: }
106: # endif
107: while (0 == h && GC_collect_or_expand(n_blocks, FALSE)) {
108: h = GC_allochblk(lw, k, 0);
109: }
110: if (h == 0) {
111: op = 0;
112: } else {
113: op = (ptr_t) (h -> hb_body);
114: GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
115: }
116: }
117: GC_words_allocd += lw;
118:
119: out:
120: return((ptr_t)op);
121: }
122:
123: ptr_t GC_generic_malloc(lb, k)
124: register word lb;
125: register int k;
126: {
127: ptr_t result;
128: DCL_LOCK_STATE;
129:
130: GC_INVOKE_FINALIZERS();
131: DISABLE_SIGNALS();
132: LOCK();
133: result = GC_generic_malloc_inner(lb, k);
134: UNLOCK();
135: ENABLE_SIGNALS();
136: if (0 == result) {
137: return((*GC_oom_fn)(lb));
138: } else {
139: return(result);
140: }
141: }
142:
143:
144: #define GENERAL_MALLOC(lb,k) \
145: (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
146: /* We make the GC_clear_stack_call a tail call, hoping to get more of */
147: /* the stack. */
148:
149: /* Allocate lb bytes of atomic (pointerfree) data */
150: # ifdef __STDC__
151: GC_PTR GC_malloc_atomic(size_t lb)
152: # else
153: GC_PTR GC_malloc_atomic(lb)
154: size_t lb;
155: # endif
156: {
157: register ptr_t op;
158: register ptr_t * opp;
159: register word lw;
160: DCL_LOCK_STATE;
161:
162: if( SMALL_OBJ(lb) ) {
163: # ifdef MERGE_SIZES
164: lw = GC_size_map[lb];
165: # else
166: lw = ALIGNED_WORDS(lb);
167: # endif
168: opp = &(GC_aobjfreelist[lw]);
169: FASTLOCK();
170: if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
171: FASTUNLOCK();
172: return(GENERAL_MALLOC((word)lb, PTRFREE));
173: }
174: /* See above comment on signals. */
175: *opp = obj_link(op);
176: GC_words_allocd += lw;
177: FASTUNLOCK();
178: return((GC_PTR) op);
179: } else {
180: return(GENERAL_MALLOC((word)lb, PTRFREE));
181: }
182: }
183:
184: /* Allocate lb bytes of composite (pointerful) data */
185: # ifdef __STDC__
186: GC_PTR GC_malloc(size_t lb)
187: # else
188: GC_PTR GC_malloc(lb)
189: size_t lb;
190: # endif
191: {
192: register ptr_t op;
193: register ptr_t *opp;
194: register word lw;
195: DCL_LOCK_STATE;
196:
197: if( SMALL_OBJ(lb) ) {
198: # ifdef MERGE_SIZES
199: lw = GC_size_map[lb];
200: # else
201: lw = ALIGNED_WORDS(lb);
202: # endif
203: opp = &(GC_objfreelist[lw]);
204: FASTLOCK();
205: if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
206: FASTUNLOCK();
207: return(GENERAL_MALLOC((word)lb, NORMAL));
208: }
209: /* See above comment on signals. */
210: *opp = obj_link(op);
211: obj_link(op) = 0;
212: GC_words_allocd += lw;
213: FASTUNLOCK();
214: return((GC_PTR) op);
215: } else {
216: return(GENERAL_MALLOC((word)lb, NORMAL));
217: }
218: }
219:
220: # ifdef REDIRECT_MALLOC
221: # ifdef __STDC__
222: GC_PTR malloc(size_t lb)
223: # else
224: GC_PTR malloc(lb)
225: size_t lb;
226: # endif
227: {
228: /* It might help to manually inline the GC_malloc call here. */
229: /* But any decent compiler should reduce the extra procedure call */
230: /* to at most a jump instruction in this case. */
231: # if defined(I386) && defined(SOLARIS_THREADS)
232: /*
233: * Thread initialisation can call malloc before
234: * we're ready for it.
235: * It's not clear that this is enough to help matters.
236: * The thread implementation may well call malloc at other
237: * inopportune times.
238: */
239: if (!GC_is_initialized) return sbrk(lb);
240: # endif /* I386 && SOLARIS_THREADS */
241: return(REDIRECT_MALLOC(lb));
242: }
243:
244: # ifdef __STDC__
245: GC_PTR calloc(size_t n, size_t lb)
246: # else
247: GC_PTR calloc(n, lb)
248: size_t n, lb;
249: # endif
250: {
251: return(REDIRECT_MALLOC(n*lb));
252: }
253: # endif /* REDIRECT_MALLOC */
254:
255: GC_PTR GC_generic_or_special_malloc(lb,knd)
256: word lb;
257: int knd;
258: {
259: switch(knd) {
260: # ifdef STUBBORN_ALLOC
261: case STUBBORN:
262: return(GC_malloc_stubborn((size_t)lb));
263: # endif
264: case PTRFREE:
265: return(GC_malloc_atomic((size_t)lb));
266: case NORMAL:
267: return(GC_malloc((size_t)lb));
268: case UNCOLLECTABLE:
269: return(GC_malloc_uncollectable((size_t)lb));
270: # ifdef ATOMIC_UNCOLLECTABLE
271: case AUNCOLLECTABLE:
272: return(GC_malloc_atomic_uncollectable((size_t)lb));
273: # endif /* ATOMIC_UNCOLLECTABLE */
274: default:
275: return(GC_generic_malloc(lb,knd));
276: }
277: }
278:
279:
280: /* Change the size of the block pointed to by p to contain at least */
281: /* lb bytes. The object may be (and quite likely will be) moved. */
282: /* The kind (e.g. atomic) is the same as that of the old. */
283: /* Shrinking of large blocks is not implemented well. */
284: # ifdef __STDC__
285: GC_PTR GC_realloc(GC_PTR p, size_t lb)
286: # else
287: GC_PTR GC_realloc(p,lb)
288: GC_PTR p;
289: size_t lb;
290: # endif
291: {
292: register struct hblk * h;
293: register hdr * hhdr;
294: register word sz; /* Current size in bytes */
295: register word orig_sz; /* Original sz in bytes */
296: int obj_kind;
297:
298: if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
299: h = HBLKPTR(p);
300: hhdr = HDR(h);
301: sz = hhdr -> hb_sz;
302: obj_kind = hhdr -> hb_obj_kind;
303: sz = WORDS_TO_BYTES(sz);
304: orig_sz = sz;
305:
306: if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
307: /* Round it up to the next whole heap block */
308: register word descr;
309:
310: sz = (sz+HDR_BYTES+HBLKSIZE-1)
311: & (~HBLKMASK);
312: sz -= HDR_BYTES;
313: hhdr -> hb_sz = BYTES_TO_WORDS(sz);
314: descr = GC_obj_kinds[obj_kind].ok_descriptor;
315: if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
316: hhdr -> hb_descr = descr;
317: if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
318: /* Extra area is already cleared by allochblk. */
319: }
320: if (ADD_SLOP(lb) <= sz) {
321: if (lb >= (sz >> 1)) {
322: # ifdef STUBBORN_ALLOC
323: if (obj_kind == STUBBORN) GC_change_stubborn(p);
324: # endif
325: if (orig_sz > lb) {
326: /* Clear unneeded part of object to avoid bogus pointer */
327: /* tracing. */
328: /* Safe for stubborn objects. */
329: BZERO(((ptr_t)p) + lb, orig_sz - lb);
330: }
331: return(p);
332: } else {
333: /* shrink */
334: GC_PTR result =
335: GC_generic_or_special_malloc((word)lb, obj_kind);
336:
337: if (result == 0) return(0);
338: /* Could also return original object. But this */
339: /* gives the client warning of imminent disaster. */
340: BCOPY(p, result, lb);
341: # ifndef IGNORE_FREE
342: GC_free(p);
343: # endif
344: return(result);
345: }
346: } else {
347: /* grow */
348: GC_PTR result =
349: GC_generic_or_special_malloc((word)lb, obj_kind);
350:
351: if (result == 0) return(0);
352: BCOPY(p, result, sz);
353: # ifndef IGNORE_FREE
354: GC_free(p);
355: # endif
356: return(result);
357: }
358: }
359:
360: # ifdef REDIRECT_MALLOC
361: # ifdef __STDC__
362: GC_PTR realloc(GC_PTR p, size_t lb)
363: # else
364: GC_PTR realloc(p,lb)
365: GC_PTR p;
366: size_t lb;
367: # endif
368: {
369: return(GC_realloc(p, lb));
370: }
371: # endif /* REDIRECT_MALLOC */
372:
373: /* Explicitly deallocate an object p. */
374: # ifdef __STDC__
375: void GC_free(GC_PTR p)
376: # else
377: void GC_free(p)
378: GC_PTR p;
379: # endif
380: {
381: register struct hblk *h;
382: register hdr *hhdr;
383: register signed_word sz;
384: register ptr_t * flh;
385: register int knd;
386: register struct obj_kind * ok;
387: DCL_LOCK_STATE;
388:
389: if (p == 0) return;
390: /* Required by ANSI. It's not my fault ... */
391: h = HBLKPTR(p);
392: hhdr = HDR(h);
393: # if defined(REDIRECT_MALLOC) && \
394: (defined(SOLARIS_THREADS) || defined(LINUX_THREADS))
395: /* We have to redirect malloc calls during initialization. */
396: /* Don't try to deallocate that memory. */
397: if (0 == hhdr) return;
398: # endif
399: knd = hhdr -> hb_obj_kind;
400: sz = hhdr -> hb_sz;
401: ok = &GC_obj_kinds[knd];
402: if (sz <= MAXOBJSZ) {
403: # ifdef THREADS
404: DISABLE_SIGNALS();
405: LOCK();
406: # endif
407: GC_mem_freed += sz;
408: /* A signal here can make GC_mem_freed and GC_non_gc_bytes */
409: /* inconsistent. We claim this is benign. */
410: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
411: /* Its unnecessary to clear the mark bit. If the */
412: /* object is reallocated, it doesn't matter. O.w. the */
413: /* collector will do it, since it's on a free list. */
414: if (ok -> ok_init) {
415: BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
416: }
417: flh = &(ok -> ok_freelist[sz]);
418: obj_link(p) = *flh;
419: *flh = (ptr_t)p;
420: # ifdef THREADS
421: UNLOCK();
422: ENABLE_SIGNALS();
423: # endif
424: } else {
425: DISABLE_SIGNALS();
426: LOCK();
427: GC_mem_freed += sz;
428: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
429: GC_freehblk(h);
430: UNLOCK();
431: ENABLE_SIGNALS();
432: }
433: }
434:
435: # ifdef REDIRECT_MALLOC
436: # ifdef __STDC__
437: void free(GC_PTR p)
438: # else
439: void free(p)
440: GC_PTR p;
441: # endif
442: {
443: # ifndef IGNORE_FREE
444: GC_free(p);
445: # endif
446: }
447: # endif /* REDIRECT_MALLOC */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>