[BACK]Return to mallocx.c CVS log [TXT][DIR] Up to [local] / OpenXM_contrib / gc

Annotation of OpenXM_contrib/gc/mallocx.c, Revision 1.1.1.1

1.1       maekawa     1: /*
                      2:  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
                      3:  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
                      4:  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
                      5:  *
                      6:  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
                      7:  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
                      8:  *
                      9:  * Permission is hereby granted to use or copy this program
                     10:  * for any purpose,  provided the above notices are retained on all copies.
                     11:  * Permission to modify the code and to distribute modified code is granted,
                     12:  * provided the above notices are retained, and a notice that the code was
                     13:  * modified is included with the above copyright notice.
                     14:  */
                     15:
                     16: /*
                     17:  * These are extra allocation routines which are likely to be less
                     18:  * frequently used than those in malloc.c.  They are separate in the
                     19:  * hope that the .o file will be excluded from statically linked
                     20:  * executables.  We should probably break this up further.
                     21:  */
                     22:
                     23: #include <stdio.h>
                     24: #include "gc_priv.h"
                     25:
                     26: extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
                     27: void GC_extend_size_map();      /* in misc.c. */
                     28: GC_bool GC_alloc_reclaim_list();       /* in malloc.c */
                     29:
                     30: /* Some externally visible but unadvertised variables to allow access to */
                     31: /* free lists from inlined allocators without including gc_priv.h       */
                     32: /* or introducing dependencies on internal data structure layouts.      */
                     33: ptr_t * CONST GC_objfreelist_ptr = GC_objfreelist;
                     34: ptr_t * CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
                     35: ptr_t * CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
                     36: # ifdef ATOMIC_UNCOLLECTABLE
                     37:     ptr_t * CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
                     38: # endif
                     39:
                     40: /* Allocate a composite object of size n bytes.  The caller guarantees  */
                     41: /* that pointers past the first page are not relevant.  Caller holds    */
                     42: /* allocation lock.                                                     */
                     43: ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
                     44: register size_t lb;
                     45: register int k;
                     46: {
                     47:     register struct hblk * h;
                     48:     register word n_blocks;
                     49:     register word lw;
                     50:     register ptr_t op;
                     51:
                     52:     if (lb <= HBLKSIZE)
                     53:         return(GC_generic_malloc_inner((word)lb, k));
                     54:     n_blocks = divHBLKSZ(ADD_SLOP(lb) + HDR_BYTES + HBLKSIZE-1);
                     55:     if (!GC_is_initialized) GC_init_inner();
                     56:     /* Do our share of marking work */
                     57:     if(GC_incremental && !GC_dont_gc)
                     58:         GC_collect_a_little_inner((int)n_blocks);
                     59:     lw = ROUNDED_UP_WORDS(lb);
                     60:     while ((h = GC_allochblk(lw, k, IGNORE_OFF_PAGE)) == 0
                     61:            && GC_collect_or_expand(n_blocks, TRUE));
                     62:     if (h == 0) {
                     63:         op = 0;
                     64:     } else {
                     65:         op = (ptr_t) (h -> hb_body);
                     66:         GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
                     67:     }
                     68:     GC_words_allocd += lw;
                     69:     return((ptr_t)op);
                     70: }
                     71:
                     72: ptr_t GC_generic_malloc_ignore_off_page(lb, k)
                     73: register size_t lb;
                     74: register int k;
                     75: {
                     76:     register ptr_t result;
                     77:     DCL_LOCK_STATE;
                     78:
                     79:     GC_INVOKE_FINALIZERS();
                     80:     DISABLE_SIGNALS();
                     81:     LOCK();
                     82:     result = GC_generic_malloc_inner_ignore_off_page(lb,k);
                     83:     UNLOCK();
                     84:     ENABLE_SIGNALS();
                     85:     if (0 == result) {
                     86:         return((*GC_oom_fn)(lb));
                     87:     } else {
                     88:         return(result);
                     89:     }
                     90: }
                     91:
                     92: # if defined(__STDC__) || defined(__cplusplus)
                     93:   void * GC_malloc_ignore_off_page(size_t lb)
                     94: # else
                     95:   char * GC_malloc_ignore_off_page(lb)
                     96:   register size_t lb;
                     97: # endif
                     98: {
                     99:     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
                    100: }
                    101:
                    102: # if defined(__STDC__) || defined(__cplusplus)
                    103:   void * GC_malloc_atomic_ignore_off_page(size_t lb)
                    104: # else
                    105:   char * GC_malloc_atomic_ignore_off_page(lb)
                    106:   register size_t lb;
                    107: # endif
                    108: {
                    109:     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
                    110: }
                    111:
                    112: /* Increment GC_words_allocd from code that doesn't have direct access         */
                    113: /* to GC_arrays.                                                       */
                    114: # ifdef __STDC__
                    115: void GC_incr_words_allocd(size_t n)
                    116: {
                    117:     GC_words_allocd += n;
                    118: }
                    119:
                    120: /* The same for GC_mem_freed.                          */
                    121: void GC_incr_mem_freed(size_t n)
                    122: {
                    123:     GC_mem_freed += n;
                    124: }
                    125: # endif /* __STDC__ */
                    126:
                    127: /* Analogous to the above, but assumes a small object size, and        */
                    128: /* bypasses MERGE_SIZES mechanism.  Used by gc_inline.h.               */
                    129: #ifdef __STDC__
                    130:      ptr_t GC_generic_malloc_words_small(size_t lw, int k)
                    131: #else
                    132:      ptr_t GC_generic_malloc_words_small(lw, k)
                    133:      register word lw;
                    134:      register int k;
                    135: #endif
                    136: {
                    137: register ptr_t op;
                    138: register ptr_t *opp;
                    139: register struct obj_kind * kind = GC_obj_kinds + k;
                    140: DCL_LOCK_STATE;
                    141:
                    142:     GC_INVOKE_FINALIZERS();
                    143:     DISABLE_SIGNALS();
                    144:     LOCK();
                    145:     opp = &(kind -> ok_freelist[lw]);
                    146:     if( (op = *opp) == 0 ) {
                    147:         if (!GC_is_initialized) {
                    148:             GC_init_inner();
                    149:         }
                    150:        if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
                    151:            op = GC_clear_stack(GC_allocobj((word)lw, k));
                    152:        }
                    153:        if (op == 0) {
                    154:            UNLOCK();
                    155:            ENABLE_SIGNALS();
                    156:            return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
                    157:        }
                    158:     }
                    159:     *opp = obj_link(op);
                    160:     obj_link(op) = 0;
                    161:     GC_words_allocd += lw;
                    162:     UNLOCK();
                    163:     ENABLE_SIGNALS();
                    164:     return((ptr_t)op);
                    165: }
                    166:
                    167: #if defined(THREADS) && !defined(SRC_M3)
                    168: /* Return a list of 1 or more objects of the indicated size, linked    */
                    169: /* through the first word in the object.  This has the advantage that  */
                    170: /* it acquires the allocation lock only once, and may greatly reduce   */
                    171: /* time wasted contending for the allocation lock.  Typical usage would */
                    172: /* be in a thread that requires many items of the same size.  It would */
                    173: /* keep its own free list in thread-local storage, and call            */
                    174: /* GC_malloc_many or friends to replenish it.  (We do not round up     */
                    175: /* object sizes, since a call indicates the intention to consume many  */
                    176: /* objects of exactly this size.)                                      */
                    177: /* Note that the client should usually clear the link field.           */
                    178: ptr_t GC_generic_malloc_many(lb, k)
                    179: register word lb;
                    180: register int k;
                    181: {
                    182: ptr_t op;
                    183: register ptr_t p;
                    184: ptr_t *opp;
                    185: word lw;
                    186: register word my_words_allocd;
                    187: DCL_LOCK_STATE;
                    188:
                    189:     if (!SMALL_OBJ(lb)) {
                    190:         op = GC_generic_malloc(lb, k);
                    191:         if(0 != op) obj_link(op) = 0;
                    192:         return(op);
                    193:     }
                    194:     lw = ALIGNED_WORDS(lb);
                    195:     GC_INVOKE_FINALIZERS();
                    196:     DISABLE_SIGNALS();
                    197:     LOCK();
                    198:     opp = &(GC_obj_kinds[k].ok_freelist[lw]);
                    199:     if( (op = *opp) == 0 ) {
                    200:         if (!GC_is_initialized) {
                    201:             GC_init_inner();
                    202:         }
                    203:        op = GC_clear_stack(GC_allocobj(lw, k));
                    204:        if (op == 0) {
                    205:            UNLOCK();
                    206:            ENABLE_SIGNALS();
                    207:            op = (*GC_oom_fn)(lb);
                    208:            if(0 != op) obj_link(op) = 0;
                    209:             return(op);
                    210:        }
                    211:     }
                    212:     *opp = 0;
                    213:     my_words_allocd = 0;
                    214:     for (p = op; p != 0; p = obj_link(p)) {
                    215:         my_words_allocd += lw;
                    216:         if (my_words_allocd >= BODY_SZ) {
                    217:             *opp = obj_link(p);
                    218:             obj_link(p) = 0;
                    219:             break;
                    220:         }
                    221:     }
                    222:     GC_words_allocd += my_words_allocd;
                    223:
                    224: out:
                    225:     UNLOCK();
                    226:     ENABLE_SIGNALS();
                    227:     return(op);
                    228:
                    229: }
                    230:
                    231: void * GC_malloc_many(size_t lb)
                    232: {
                    233:     return(GC_generic_malloc_many(lb, NORMAL));
                    234: }
                    235:
                    236: /* Note that the "atomic" version of this would be unsafe, since the   */
                    237: /* links would not be seen by the collector.                           */
                    238: # endif
                    239:
                    240: /* Allocate lb bytes of pointerful, traced, but not collectable data */
                    241: # ifdef __STDC__
                    242:     GC_PTR GC_malloc_uncollectable(size_t lb)
                    243: # else
                    244:     GC_PTR GC_malloc_uncollectable(lb)
                    245:     size_t lb;
                    246: # endif
                    247: {
                    248: register ptr_t op;
                    249: register ptr_t *opp;
                    250: register word lw;
                    251: DCL_LOCK_STATE;
                    252:
                    253:     if( SMALL_OBJ(lb) ) {
                    254: #       ifdef MERGE_SIZES
                    255: #        ifdef ADD_BYTE_AT_END
                    256:            if (lb != 0) lb--;
                    257:                  /* We don't need the extra byte, since this won't be  */
                    258:                  /* collected anyway.                                  */
                    259: #        endif
                    260:          lw = GC_size_map[lb];
                    261: #      else
                    262:          lw = ALIGNED_WORDS(lb);
                    263: #       endif
                    264:        opp = &(GC_uobjfreelist[lw]);
                    265:        FASTLOCK();
                    266:         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
                    267:             /* See above comment on signals.   */
                    268:             *opp = obj_link(op);
                    269:             obj_link(op) = 0;
                    270:             GC_words_allocd += lw;
                    271:             /* Mark bit ws already set on free list.  It will be       */
                    272:            /* cleared only temporarily during a collection, as a       */
                    273:            /* result of the normal free list mark bit clearing.        */
                    274:             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    275:             FASTUNLOCK();
                    276:             return((GC_PTR) op);
                    277:         }
                    278:         FASTUNLOCK();
                    279:         op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
                    280:     } else {
                    281:        op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
                    282:     }
                    283:     if (0 == op) return(0);
                    284:     /* We don't need the lock here, since we have an undisguised       */
                    285:     /* pointer.  We do need to hold the lock while we adjust           */
                    286:     /* mark bits.                                                      */
                    287:     {
                    288:        register struct hblk * h;
                    289:
                    290:        h = HBLKPTR(op);
                    291:        lw = HDR(h) -> hb_sz;
                    292:
                    293:        DISABLE_SIGNALS();
                    294:        LOCK();
                    295:        GC_set_mark_bit(op);
                    296:        GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    297:        UNLOCK();
                    298:        ENABLE_SIGNALS();
                    299:        return((GC_PTR) op);
                    300:     }
                    301: }
                    302:
                    303: # ifdef ATOMIC_UNCOLLECTABLE
                    304: /* Allocate lb bytes of pointerfree, untraced, uncollectable data      */
                    305: /* This is normally roughly equivalent to the system malloc.           */
                    306: /* But it may be useful if malloc is redefined.                                */
                    307: # ifdef __STDC__
                    308:     GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
                    309: # else
                    310:     GC_PTR GC_malloc_atomic_uncollectable(lb)
                    311:     size_t lb;
                    312: # endif
                    313: {
                    314: register ptr_t op;
                    315: register ptr_t *opp;
                    316: register word lw;
                    317: DCL_LOCK_STATE;
                    318:
                    319:     if( SMALL_OBJ(lb) ) {
                    320: #       ifdef MERGE_SIZES
                    321: #        ifdef ADD_BYTE_AT_END
                    322:            if (lb != 0) lb--;
                    323:                  /* We don't need the extra byte, since this won't be  */
                    324:                  /* collected anyway.                                  */
                    325: #        endif
                    326:          lw = GC_size_map[lb];
                    327: #      else
                    328:          lw = ALIGNED_WORDS(lb);
                    329: #       endif
                    330:        opp = &(GC_auobjfreelist[lw]);
                    331:        FASTLOCK();
                    332:         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
                    333:             /* See above comment on signals.   */
                    334:             *opp = obj_link(op);
                    335:             obj_link(op) = 0;
                    336:             GC_words_allocd += lw;
                    337:            /* Mark bit was already set while object was on free list. */
                    338:             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    339:             FASTUNLOCK();
                    340:             return((GC_PTR) op);
                    341:         }
                    342:         FASTUNLOCK();
                    343:         op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
                    344:     } else {
                    345:        op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
                    346:     }
                    347:     if (0 == op) return(0);
                    348:     /* We don't need the lock here, since we have an undisguised       */
                    349:     /* pointer.  We do need to hold the lock while we adjust           */
                    350:     /* mark bits.                                                      */
                    351:     {
                    352:        register struct hblk * h;
                    353:
                    354:        h = HBLKPTR(op);
                    355:        lw = HDR(h) -> hb_sz;
                    356:
                    357:        DISABLE_SIGNALS();
                    358:        LOCK();
                    359:        GC_set_mark_bit(op);
                    360:        GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    361:        UNLOCK();
                    362:        ENABLE_SIGNALS();
                    363:        return((GC_PTR) op);
                    364:     }
                    365: }
                    366:
                    367: #endif /* ATOMIC_UNCOLLECTABLE */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>