[BACK]Return to mallocx.c CVS log [TXT][DIR] Up to [local] / OpenXM_contrib2 / asir2000 / gc

Annotation of OpenXM_contrib2/asir2000/gc/mallocx.c, Revision 1.1.1.1

1.1       noro        1: /*
                      2:  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
                      3:  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
                      4:  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
                      5:  *
                      6:  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
                      7:  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
                      8:  *
                      9:  * Permission is hereby granted to use or copy this program
                     10:  * for any purpose,  provided the above notices are retained on all copies.
                     11:  * Permission to modify the code and to distribute modified code is granted,
                     12:  * provided the above notices are retained, and a notice that the code was
                     13:  * modified is included with the above copyright notice.
                     14:  */
                     15:
                     16: /*
                     17:  * These are extra allocation routines which are likely to be less
                     18:  * frequently used than those in malloc.c.  They are separate in the
                     19:  * hope that the .o file will be excluded from statically linked
                     20:  * executables.  We should probably break this up further.
                     21:  */
                     22:
                     23: #include <stdio.h>
                     24: #include "gc_priv.h"
                     25:
                     26: extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
                     27: void GC_extend_size_map();      /* in misc.c. */
                     28: GC_bool GC_alloc_reclaim_list();       /* in malloc.c */
                     29:
                     30: /* Some externally visible but unadvertised variables to allow access to */
                     31: /* free lists from inlined allocators without including gc_priv.h       */
                     32: /* or introducing dependencies on internal data structure layouts.      */
                     33: ptr_t * CONST GC_objfreelist_ptr = GC_objfreelist;
                     34: ptr_t * CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
                     35: ptr_t * CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
                     36: # ifdef ATOMIC_UNCOLLECTABLE
                     37:     ptr_t * CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
                     38: # endif
                     39:
                     40: /* Allocate a composite object of size n bytes.  The caller guarantees  */
                     41: /* that pointers past the first page are not relevant.  Caller holds    */
                     42: /* allocation lock.                                                     */
                     43: ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
                     44: register size_t lb;
                     45: register int k;
                     46: {
                     47:     register struct hblk * h;
                     48:     register word n_blocks;
                     49:     register word lw;
                     50:     register ptr_t op;
                     51:
                     52:     if (lb <= HBLKSIZE)
                     53:         return(GC_generic_malloc_inner((word)lb, k));
                     54:     n_blocks = divHBLKSZ(ADD_SLOP(lb) + HDR_BYTES + HBLKSIZE-1);
                     55:     if (!GC_is_initialized) GC_init_inner();
                     56:     /* Do our share of marking work */
                     57:     if(GC_incremental && !GC_dont_gc)
                     58:         GC_collect_a_little_inner((int)n_blocks);
                     59:     lw = ROUNDED_UP_WORDS(lb);
                     60:     h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
                     61: #   ifdef USE_MUNMAP
                     62:       if (0 == h) {
                     63:         GC_merge_unmapped();
                     64:         h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
                     65:       }
                     66: #   endif
                     67:     while (0 == h && GC_collect_or_expand(n_blocks, TRUE)) {
                     68:       h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
                     69:     }
                     70:     if (h == 0) {
                     71:         op = 0;
                     72:     } else {
                     73:         op = (ptr_t) (h -> hb_body);
                     74:         GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
                     75:     }
                     76:     GC_words_allocd += lw;
                     77:     return((ptr_t)op);
                     78: }
                     79:
                     80: ptr_t GC_generic_malloc_ignore_off_page(lb, k)
                     81: register size_t lb;
                     82: register int k;
                     83: {
                     84:     register ptr_t result;
                     85:     DCL_LOCK_STATE;
                     86:
                     87:     GC_INVOKE_FINALIZERS();
                     88:     DISABLE_SIGNALS();
                     89:     LOCK();
                     90:     result = GC_generic_malloc_inner_ignore_off_page(lb,k);
                     91:     UNLOCK();
                     92:     ENABLE_SIGNALS();
                     93:     if (0 == result) {
                     94:         return((*GC_oom_fn)(lb));
                     95:     } else {
                     96:         return(result);
                     97:     }
                     98: }
                     99:
                    100: # if defined(__STDC__) || defined(__cplusplus)
                    101:   void * GC_malloc_ignore_off_page(size_t lb)
                    102: # else
                    103:   char * GC_malloc_ignore_off_page(lb)
                    104:   register size_t lb;
                    105: # endif
                    106: {
                    107:     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
                    108: }
                    109:
                    110: # if defined(__STDC__) || defined(__cplusplus)
                    111:   void * GC_malloc_atomic_ignore_off_page(size_t lb)
                    112: # else
                    113:   char * GC_malloc_atomic_ignore_off_page(lb)
                    114:   register size_t lb;
                    115: # endif
                    116: {
                    117:     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
                    118: }
                    119:
                    120: /* Increment GC_words_allocd from code that doesn't have direct access         */
                    121: /* to GC_arrays.                                                       */
                    122: # ifdef __STDC__
                    123: void GC_incr_words_allocd(size_t n)
                    124: {
                    125:     GC_words_allocd += n;
                    126: }
                    127:
                    128: /* The same for GC_mem_freed.                          */
                    129: void GC_incr_mem_freed(size_t n)
                    130: {
                    131:     GC_mem_freed += n;
                    132: }
                    133: # endif /* __STDC__ */
                    134:
                    135: /* Analogous to the above, but assumes a small object size, and        */
                    136: /* bypasses MERGE_SIZES mechanism.  Used by gc_inline.h.               */
                    137: #ifdef __STDC__
                    138:      ptr_t GC_generic_malloc_words_small(size_t lw, int k)
                    139: #else
                    140:      ptr_t GC_generic_malloc_words_small(lw, k)
                    141:      register word lw;
                    142:      register int k;
                    143: #endif
                    144: {
                    145: register ptr_t op;
                    146: register ptr_t *opp;
                    147: register struct obj_kind * kind = GC_obj_kinds + k;
                    148: DCL_LOCK_STATE;
                    149:
                    150:     GC_INVOKE_FINALIZERS();
                    151:     DISABLE_SIGNALS();
                    152:     LOCK();
                    153:     opp = &(kind -> ok_freelist[lw]);
                    154:     if( (op = *opp) == 0 ) {
                    155:         if (!GC_is_initialized) {
                    156:             GC_init_inner();
                    157:         }
                    158:        if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
                    159:            op = GC_clear_stack(GC_allocobj((word)lw, k));
                    160:        }
                    161:        if (op == 0) {
                    162:            UNLOCK();
                    163:            ENABLE_SIGNALS();
                    164:            return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
                    165:        }
                    166:     }
                    167:     *opp = obj_link(op);
                    168:     obj_link(op) = 0;
                    169:     GC_words_allocd += lw;
                    170:     UNLOCK();
                    171:     ENABLE_SIGNALS();
                    172:     return((ptr_t)op);
                    173: }
                    174:
                    175: #if defined(THREADS) && !defined(SRC_M3)
                    176: /* Return a list of 1 or more objects of the indicated size, linked    */
                    177: /* through the first word in the object.  This has the advantage that  */
                    178: /* it acquires the allocation lock only once, and may greatly reduce   */
                    179: /* time wasted contending for the allocation lock.  Typical usage would */
                    180: /* be in a thread that requires many items of the same size.  It would */
                    181: /* keep its own free list in thread-local storage, and call            */
                    182: /* GC_malloc_many or friends to replenish it.  (We do not round up     */
                    183: /* object sizes, since a call indicates the intention to consume many  */
                    184: /* objects of exactly this size.)                                      */
                    185: /* Note that the client should usually clear the link field.           */
                    186: ptr_t GC_generic_malloc_many(lb, k)
                    187: register word lb;
                    188: register int k;
                    189: {
                    190: ptr_t op;
                    191: register ptr_t p;
                    192: ptr_t *opp;
                    193: word lw;
                    194: register word my_words_allocd;
                    195: DCL_LOCK_STATE;
                    196:
                    197:     if (!SMALL_OBJ(lb)) {
                    198:         op = GC_generic_malloc(lb, k);
                    199:         if(0 != op) obj_link(op) = 0;
                    200:         return(op);
                    201:     }
                    202:     lw = ALIGNED_WORDS(lb);
                    203:     GC_INVOKE_FINALIZERS();
                    204:     DISABLE_SIGNALS();
                    205:     LOCK();
                    206:     opp = &(GC_obj_kinds[k].ok_freelist[lw]);
                    207:     if( (op = *opp) == 0 ) {
                    208:         if (!GC_is_initialized) {
                    209:             GC_init_inner();
                    210:         }
                    211:        op = GC_clear_stack(GC_allocobj(lw, k));
                    212:        if (op == 0) {
                    213:            UNLOCK();
                    214:            ENABLE_SIGNALS();
                    215:            op = (*GC_oom_fn)(lb);
                    216:            if(0 != op) obj_link(op) = 0;
                    217:             return(op);
                    218:        }
                    219:     }
                    220:     *opp = 0;
                    221:     my_words_allocd = 0;
                    222:     for (p = op; p != 0; p = obj_link(p)) {
                    223:         my_words_allocd += lw;
                    224:         if (my_words_allocd >= BODY_SZ) {
                    225:             *opp = obj_link(p);
                    226:             obj_link(p) = 0;
                    227:             break;
                    228:         }
                    229:     }
                    230:     GC_words_allocd += my_words_allocd;
                    231:
                    232: out:
                    233:     UNLOCK();
                    234:     ENABLE_SIGNALS();
                    235:     return(op);
                    236:
                    237: }
                    238:
                    239: void * GC_malloc_many(size_t lb)
                    240: {
                    241:     return(GC_generic_malloc_many(lb, NORMAL));
                    242: }
                    243:
                    244: /* Note that the "atomic" version of this would be unsafe, since the   */
                    245: /* links would not be seen by the collector.                           */
                    246: # endif
                    247:
                    248: /* Allocate lb bytes of pointerful, traced, but not collectable data */
                    249: # ifdef __STDC__
                    250:     GC_PTR GC_malloc_uncollectable(size_t lb)
                    251: # else
                    252:     GC_PTR GC_malloc_uncollectable(lb)
                    253:     size_t lb;
                    254: # endif
                    255: {
                    256: register ptr_t op;
                    257: register ptr_t *opp;
                    258: register word lw;
                    259: DCL_LOCK_STATE;
                    260:
                    261:     if( SMALL_OBJ(lb) ) {
                    262: #       ifdef MERGE_SIZES
                    263: #        ifdef ADD_BYTE_AT_END
                    264:            if (lb != 0) lb--;
                    265:                  /* We don't need the extra byte, since this won't be  */
                    266:                  /* collected anyway.                                  */
                    267: #        endif
                    268:          lw = GC_size_map[lb];
                    269: #      else
                    270:          lw = ALIGNED_WORDS(lb);
                    271: #       endif
                    272:        opp = &(GC_uobjfreelist[lw]);
                    273:        FASTLOCK();
                    274:         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
                    275:             /* See above comment on signals.   */
                    276:             *opp = obj_link(op);
                    277:             obj_link(op) = 0;
                    278:             GC_words_allocd += lw;
                    279:             /* Mark bit ws already set on free list.  It will be       */
                    280:            /* cleared only temporarily during a collection, as a       */
                    281:            /* result of the normal free list mark bit clearing.        */
                    282:             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    283:             FASTUNLOCK();
                    284:             return((GC_PTR) op);
                    285:         }
                    286:         FASTUNLOCK();
                    287:         op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
                    288:     } else {
                    289:        op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
                    290:     }
                    291:     if (0 == op) return(0);
                    292:     /* We don't need the lock here, since we have an undisguised       */
                    293:     /* pointer.  We do need to hold the lock while we adjust           */
                    294:     /* mark bits.                                                      */
                    295:     {
                    296:        register struct hblk * h;
                    297:
                    298:        h = HBLKPTR(op);
                    299:        lw = HDR(h) -> hb_sz;
                    300:
                    301:        DISABLE_SIGNALS();
                    302:        LOCK();
                    303:        GC_set_mark_bit(op);
                    304:        GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    305:        UNLOCK();
                    306:        ENABLE_SIGNALS();
                    307:        return((GC_PTR) op);
                    308:     }
                    309: }
                    310:
                    311: # ifdef ATOMIC_UNCOLLECTABLE
                    312: /* Allocate lb bytes of pointerfree, untraced, uncollectable data      */
                    313: /* This is normally roughly equivalent to the system malloc.           */
                    314: /* But it may be useful if malloc is redefined.                                */
                    315: # ifdef __STDC__
                    316:     GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
                    317: # else
                    318:     GC_PTR GC_malloc_atomic_uncollectable(lb)
                    319:     size_t lb;
                    320: # endif
                    321: {
                    322: register ptr_t op;
                    323: register ptr_t *opp;
                    324: register word lw;
                    325: DCL_LOCK_STATE;
                    326:
                    327:     if( SMALL_OBJ(lb) ) {
                    328: #       ifdef MERGE_SIZES
                    329: #        ifdef ADD_BYTE_AT_END
                    330:            if (lb != 0) lb--;
                    331:                  /* We don't need the extra byte, since this won't be  */
                    332:                  /* collected anyway.                                  */
                    333: #        endif
                    334:          lw = GC_size_map[lb];
                    335: #      else
                    336:          lw = ALIGNED_WORDS(lb);
                    337: #       endif
                    338:        opp = &(GC_auobjfreelist[lw]);
                    339:        FASTLOCK();
                    340:         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
                    341:             /* See above comment on signals.   */
                    342:             *opp = obj_link(op);
                    343:             obj_link(op) = 0;
                    344:             GC_words_allocd += lw;
                    345:            /* Mark bit was already set while object was on free list. */
                    346:             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    347:             FASTUNLOCK();
                    348:             return((GC_PTR) op);
                    349:         }
                    350:         FASTUNLOCK();
                    351:         op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
                    352:     } else {
                    353:        op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
                    354:     }
                    355:     if (0 == op) return(0);
                    356:     /* We don't need the lock here, since we have an undisguised       */
                    357:     /* pointer.  We do need to hold the lock while we adjust           */
                    358:     /* mark bits.                                                      */
                    359:     {
                    360:        register struct hblk * h;
                    361:
                    362:        h = HBLKPTR(op);
                    363:        lw = HDR(h) -> hb_sz;
                    364:
                    365:        DISABLE_SIGNALS();
                    366:        LOCK();
                    367:        GC_set_mark_bit(op);
                    368:        GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    369:        UNLOCK();
                    370:        ENABLE_SIGNALS();
                    371:        return((GC_PTR) op);
                    372:     }
                    373: }
                    374:
                    375: #endif /* ATOMIC_UNCOLLECTABLE */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>