Annotation of OpenXM_contrib2/asir2000/gc/malloc.c, Revision 1.5
1.1 noro 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
1.3 noro 4: * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
1.1 noro 5: *
6: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8: *
9: * Permission is hereby granted to use or copy this program
10: * for any purpose, provided the above notices are retained on all copies.
11: * Permission to modify the code and to distribute modified code is granted,
12: * provided the above notices are retained, and a notice that the code was
13: * modified is included with the above copyright notice.
14: */
15: /* Boehm, February 7, 1996 4:32 pm PST */
16:
17: #include <stdio.h>
1.3 noro 18: #include "private/gc_priv.h"
1.1 noro 19:
20: extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
21: void GC_extend_size_map(); /* in misc.c. */
22:
23: /* Allocate reclaim list for kind: */
24: /* Return TRUE on success */
25: GC_bool GC_alloc_reclaim_list(kind)
26: register struct obj_kind * kind;
27: {
28: struct hblk ** result = (struct hblk **)
29: GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
30: if (result == 0) return(FALSE);
31: BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
32: kind -> ok_reclaim_list = result;
33: return(TRUE);
34: }
35:
1.3 noro 36: /* Allocate a large block of size lw words. */
37: /* The block is not cleared. */
38: /* Flags is 0 or IGNORE_OFF_PAGE. */
39: ptr_t GC_alloc_large(lw, k, flags)
40: word lw;
41: int k;
42: unsigned flags;
43: {
44: struct hblk * h;
45: word n_blocks = OBJ_SZ_TO_BLOCKS(lw);
46: ptr_t result;
47:
48: if (!GC_is_initialized) GC_init_inner();
49: /* Do our share of marking work */
50: if(GC_incremental && !GC_dont_gc)
51: GC_collect_a_little_inner((int)n_blocks);
52: h = GC_allochblk(lw, k, flags);
53: # ifdef USE_MUNMAP
54: if (0 == h) {
55: GC_merge_unmapped();
56: h = GC_allochblk(lw, k, flags);
57: }
58: # endif
59: while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) {
60: h = GC_allochblk(lw, k, flags);
61: }
62: if (h == 0) {
63: result = 0;
64: } else {
65: int total_bytes = BYTES_TO_WORDS(n_blocks * HBLKSIZE);
66: if (n_blocks > 1) {
67: GC_large_allocd_bytes += n_blocks * HBLKSIZE;
68: if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
69: GC_max_large_allocd_bytes = GC_large_allocd_bytes;
70: }
71: result = (ptr_t) (h -> hb_body);
72: GC_words_wasted += total_bytes - lw;
73: }
74: return result;
75: }
76:
77:
78: /* Allocate a large block of size lb bytes. Clear if appropriate. */
79: ptr_t GC_alloc_large_and_clear(lw, k, flags)
80: word lw;
81: int k;
82: unsigned flags;
83: {
84: ptr_t result = GC_alloc_large(lw, k, flags);
85: word n_blocks = OBJ_SZ_TO_BLOCKS(lw);
86:
87: if (0 == result) return 0;
88: if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
89: /* Clear the whole block, in case of GC_realloc call. */
90: BZERO(result, n_blocks * HBLKSIZE);
91: }
92: return result;
93: }
94:
95: /* allocate lb bytes for an object of kind k. */
1.1 noro 96: /* Should not be used to directly to allocate */
97: /* objects such as STUBBORN objects that */
98: /* require special handling on allocation. */
99: /* First a version that assumes we already */
100: /* hold lock: */
101: ptr_t GC_generic_malloc_inner(lb, k)
102: register word lb;
103: register int k;
104: {
105: register word lw;
106: register ptr_t op;
107: register ptr_t *opp;
108:
109: if( SMALL_OBJ(lb) ) {
110: register struct obj_kind * kind = GC_obj_kinds + k;
111: # ifdef MERGE_SIZES
112: lw = GC_size_map[lb];
113: # else
114: lw = ALIGNED_WORDS(lb);
1.3 noro 115: if (lw == 0) lw = MIN_WORDS;
1.1 noro 116: # endif
117: opp = &(kind -> ok_freelist[lw]);
118: if( (op = *opp) == 0 ) {
119: # ifdef MERGE_SIZES
120: if (GC_size_map[lb] == 0) {
121: if (!GC_is_initialized) GC_init_inner();
122: if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
123: return(GC_generic_malloc_inner(lb, k));
124: }
125: # else
126: if (!GC_is_initialized) {
127: GC_init_inner();
128: return(GC_generic_malloc_inner(lb, k));
129: }
130: # endif
131: if (kind -> ok_reclaim_list == 0) {
132: if (!GC_alloc_reclaim_list(kind)) goto out;
133: }
134: op = GC_allocobj(lw, k);
135: if (op == 0) goto out;
136: }
137: /* Here everything is in a consistent state. */
138: /* We assume the following assignment is */
139: /* atomic. If we get aborted */
140: /* after the assignment, we lose an object, */
141: /* but that's benign. */
142: /* Volatile declarations may need to be added */
143: /* to prevent the compiler from breaking things.*/
1.2 noro 144: /* If we only execute the second of the */
145: /* following assignments, we lose the free */
146: /* list, but that should still be OK, at least */
147: /* for garbage collected memory. */
1.1 noro 148: *opp = obj_link(op);
149: obj_link(op) = 0;
150: } else {
151: lw = ROUNDED_UP_WORDS(lb);
1.3 noro 152: op = (ptr_t)GC_alloc_large_and_clear(lw, k, 0);
1.1 noro 153: }
154: GC_words_allocd += lw;
155:
156: out:
1.3 noro 157: return op;
158: }
159:
160: /* Allocate a composite object of size n bytes. The caller guarantees */
161: /* that pointers past the first page are not relevant. Caller holds */
162: /* allocation lock. */
163: ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
164: register size_t lb;
165: register int k;
166: {
167: register word lw;
168: ptr_t op;
169:
170: if (lb <= HBLKSIZE)
171: return(GC_generic_malloc_inner((word)lb, k));
172: lw = ROUNDED_UP_WORDS(lb);
173: op = (ptr_t)GC_alloc_large_and_clear(lw, k, IGNORE_OFF_PAGE);
174: GC_words_allocd += lw;
175: return op;
1.1 noro 176: }
177:
178: ptr_t GC_generic_malloc(lb, k)
179: register word lb;
180: register int k;
181: {
182: ptr_t result;
183: DCL_LOCK_STATE;
184:
1.5 ! noro 185: if (GC_debugging_started) GC_print_all_smashed();
1.1 noro 186: GC_INVOKE_FINALIZERS();
1.3 noro 187: if (SMALL_OBJ(lb)) {
188: DISABLE_SIGNALS();
189: LOCK();
190: result = GC_generic_malloc_inner((word)lb, k);
191: UNLOCK();
192: ENABLE_SIGNALS();
193: } else {
194: word lw;
195: word n_blocks;
196: GC_bool init;
197: lw = ROUNDED_UP_WORDS(lb);
198: n_blocks = OBJ_SZ_TO_BLOCKS(lw);
199: init = GC_obj_kinds[k].ok_init;
200: DISABLE_SIGNALS();
201: LOCK();
202: result = (ptr_t)GC_alloc_large(lw, k, 0);
203: if (0 != result) {
204: if (GC_debugging_started) {
205: BZERO(result, n_blocks * HBLKSIZE);
206: } else {
207: # ifdef THREADS
208: /* Clear any memory that might be used for GC descriptors */
209: /* before we release the lock. */
210: ((word *)result)[0] = 0;
211: ((word *)result)[1] = 0;
212: ((word *)result)[lw-1] = 0;
213: ((word *)result)[lw-2] = 0;
214: # endif
215: }
216: }
217: GC_words_allocd += lw;
218: UNLOCK();
219: ENABLE_SIGNALS();
220: if (init & !GC_debugging_started && 0 != result) {
221: BZERO(result, n_blocks * HBLKSIZE);
222: }
223: }
1.1 noro 224: if (0 == result) {
225: return((*GC_oom_fn)(lb));
226: } else {
227: return(result);
228: }
229: }
230:
231:
232: #define GENERAL_MALLOC(lb,k) \
233: (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
234: /* We make the GC_clear_stack_call a tail call, hoping to get more of */
235: /* the stack. */
236:
237: /* Allocate lb bytes of atomic (pointerfree) data */
238: # ifdef __STDC__
239: GC_PTR GC_malloc_atomic(size_t lb)
240: # else
241: GC_PTR GC_malloc_atomic(lb)
242: size_t lb;
243: # endif
244: {
245: register ptr_t op;
246: register ptr_t * opp;
247: register word lw;
248: DCL_LOCK_STATE;
249:
1.3 noro 250: if( EXPECT(SMALL_OBJ(lb), 1) ) {
1.1 noro 251: # ifdef MERGE_SIZES
252: lw = GC_size_map[lb];
253: # else
254: lw = ALIGNED_WORDS(lb);
255: # endif
256: opp = &(GC_aobjfreelist[lw]);
257: FASTLOCK();
1.3 noro 258: if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) {
1.1 noro 259: FASTUNLOCK();
260: return(GENERAL_MALLOC((word)lb, PTRFREE));
261: }
262: /* See above comment on signals. */
263: *opp = obj_link(op);
264: GC_words_allocd += lw;
265: FASTUNLOCK();
266: return((GC_PTR) op);
267: } else {
268: return(GENERAL_MALLOC((word)lb, PTRFREE));
269: }
270: }
271:
272: /* Allocate lb bytes of composite (pointerful) data */
273: # ifdef __STDC__
274: GC_PTR GC_malloc(size_t lb)
275: # else
276: GC_PTR GC_malloc(lb)
277: size_t lb;
278: # endif
279: {
280: register ptr_t op;
281: register ptr_t *opp;
282: register word lw;
283: DCL_LOCK_STATE;
284:
1.3 noro 285: if( EXPECT(SMALL_OBJ(lb), 1) ) {
1.1 noro 286: # ifdef MERGE_SIZES
287: lw = GC_size_map[lb];
288: # else
289: lw = ALIGNED_WORDS(lb);
290: # endif
291: opp = &(GC_objfreelist[lw]);
292: FASTLOCK();
1.3 noro 293: if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) {
1.1 noro 294: FASTUNLOCK();
295: return(GENERAL_MALLOC((word)lb, NORMAL));
296: }
297: /* See above comment on signals. */
298: *opp = obj_link(op);
299: obj_link(op) = 0;
300: GC_words_allocd += lw;
301: FASTUNLOCK();
302: return((GC_PTR) op);
303: } else {
304: return(GENERAL_MALLOC((word)lb, NORMAL));
305: }
306: }
307:
308: # ifdef REDIRECT_MALLOC
309: # ifdef __STDC__
310: GC_PTR malloc(size_t lb)
311: # else
312: GC_PTR malloc(lb)
313: size_t lb;
314: # endif
315: {
316: /* It might help to manually inline the GC_malloc call here. */
317: /* But any decent compiler should reduce the extra procedure call */
318: /* to at most a jump instruction in this case. */
1.5 ! noro 319: # if defined(I386) && defined(GC_SOLARIS_THREADS)
1.1 noro 320: /*
321: * Thread initialisation can call malloc before
322: * we're ready for it.
323: * It's not clear that this is enough to help matters.
324: * The thread implementation may well call malloc at other
325: * inopportune times.
326: */
327: if (!GC_is_initialized) return sbrk(lb);
1.5 ! noro 328: # endif /* I386 && GC_SOLARIS_THREADS */
1.3 noro 329: return((GC_PTR)REDIRECT_MALLOC(lb));
1.1 noro 330: }
331:
332: # ifdef __STDC__
333: GC_PTR calloc(size_t n, size_t lb)
334: # else
335: GC_PTR calloc(n, lb)
336: size_t n, lb;
337: # endif
338: {
1.3 noro 339: return((GC_PTR)REDIRECT_MALLOC(n*lb));
1.1 noro 340: }
1.5 ! noro 341:
! 342: #ifndef strdup
! 343: # include <string.h>
! 344: # ifdef __STDC__
! 345: char *strdup(const char *s)
! 346: # else
! 347: char *strdup(s)
! 348: char *s;
! 349: # endif
! 350: {
! 351: size_t len = strlen(s) + 1;
! 352: char * result = ((char *)REDIRECT_MALLOC(len+1));
! 353: BCOPY(s, result, len+1);
! 354: return result;
! 355: }
! 356: #endif /* !defined(strdup) */
! 357: /* If strdup is macro defined, we assume that it actually calls malloc, */
! 358: /* and thus the right thing will happen even without overriding it. */
! 359: /* This seems to be true on most Linux systems. */
! 360:
1.1 noro 361: # endif /* REDIRECT_MALLOC */
362:
363: /* Explicitly deallocate an object p. */
364: # ifdef __STDC__
365: void GC_free(GC_PTR p)
366: # else
367: void GC_free(p)
368: GC_PTR p;
369: # endif
370: {
371: register struct hblk *h;
372: register hdr *hhdr;
373: register signed_word sz;
374: register ptr_t * flh;
375: register int knd;
376: register struct obj_kind * ok;
377: DCL_LOCK_STATE;
378:
379: if (p == 0) return;
380: /* Required by ANSI. It's not my fault ... */
381: h = HBLKPTR(p);
382: hhdr = HDR(h);
383: # if defined(REDIRECT_MALLOC) && \
1.5 ! noro 384: (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
1.3 noro 385: || defined(__MINGW32__)) /* Should this be MSWIN32 in general? */
386: /* For Solaris, we have to redirect malloc calls during */
387: /* initialization. For the others, this seems to happen */
388: /* implicitly. */
1.1 noro 389: /* Don't try to deallocate that memory. */
390: if (0 == hhdr) return;
391: # endif
392: knd = hhdr -> hb_obj_kind;
393: sz = hhdr -> hb_sz;
394: ok = &GC_obj_kinds[knd];
1.3 noro 395: if (EXPECT((sz <= MAXOBJSZ), 1)) {
1.1 noro 396: # ifdef THREADS
397: DISABLE_SIGNALS();
398: LOCK();
399: # endif
400: GC_mem_freed += sz;
401: /* A signal here can make GC_mem_freed and GC_non_gc_bytes */
402: /* inconsistent. We claim this is benign. */
403: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
404: /* Its unnecessary to clear the mark bit. If the */
405: /* object is reallocated, it doesn't matter. O.w. the */
406: /* collector will do it, since it's on a free list. */
407: if (ok -> ok_init) {
408: BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
409: }
410: flh = &(ok -> ok_freelist[sz]);
411: obj_link(p) = *flh;
412: *flh = (ptr_t)p;
413: # ifdef THREADS
414: UNLOCK();
415: ENABLE_SIGNALS();
416: # endif
417: } else {
418: DISABLE_SIGNALS();
419: LOCK();
420: GC_mem_freed += sz;
421: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
422: GC_freehblk(h);
423: UNLOCK();
424: ENABLE_SIGNALS();
425: }
426: }
1.3 noro 427:
428: /* Explicitly deallocate an object p when we already hold lock. */
429: /* Only used for internally allocated objects, so we can take some */
430: /* shortcuts. */
431: #ifdef THREADS
432: void GC_free_inner(GC_PTR p)
433: {
434: register struct hblk *h;
435: register hdr *hhdr;
436: register signed_word sz;
437: register ptr_t * flh;
438: register int knd;
439: register struct obj_kind * ok;
440: DCL_LOCK_STATE;
441:
442: h = HBLKPTR(p);
443: hhdr = HDR(h);
444: knd = hhdr -> hb_obj_kind;
445: sz = hhdr -> hb_sz;
446: ok = &GC_obj_kinds[knd];
447: if (sz <= MAXOBJSZ) {
448: GC_mem_freed += sz;
449: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
450: if (ok -> ok_init) {
451: BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
452: }
453: flh = &(ok -> ok_freelist[sz]);
454: obj_link(p) = *flh;
455: *flh = (ptr_t)p;
456: } else {
457: GC_mem_freed += sz;
458: if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
459: GC_freehblk(h);
460: }
461: }
462: #endif /* THREADS */
1.1 noro 463:
464: # ifdef REDIRECT_MALLOC
465: # ifdef __STDC__
466: void free(GC_PTR p)
467: # else
468: void free(p)
469: GC_PTR p;
470: # endif
471: {
472: # ifndef IGNORE_FREE
473: GC_free(p);
474: # endif
475: }
476: # endif /* REDIRECT_MALLOC */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>