[BACK]Return to mallocx.c CVS log [TXT][DIR] Up to [local] / OpenXM_contrib2 / asir2000 / gc

Annotation of OpenXM_contrib2/asir2000/gc/mallocx.c, Revision 1.2

1.1       noro        1: /*
                      2:  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
                      3:  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
                      4:  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
                      5:  *
                      6:  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
                      7:  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
                      8:  *
                      9:  * Permission is hereby granted to use or copy this program
                     10:  * for any purpose,  provided the above notices are retained on all copies.
                     11:  * Permission to modify the code and to distribute modified code is granted,
                     12:  * provided the above notices are retained, and a notice that the code was
                     13:  * modified is included with the above copyright notice.
                     14:  */
                     15:
                     16: /*
                     17:  * These are extra allocation routines which are likely to be less
                     18:  * frequently used than those in malloc.c.  They are separate in the
                     19:  * hope that the .o file will be excluded from statically linked
                     20:  * executables.  We should probably break this up further.
                     21:  */
                     22:
                     23: #include <stdio.h>
                     24: #include "gc_priv.h"
                     25:
                     26: extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
                     27: void GC_extend_size_map();      /* in misc.c. */
                     28: GC_bool GC_alloc_reclaim_list();       /* in malloc.c */
                     29:
                     30: /* Some externally visible but unadvertised variables to allow access to */
                     31: /* free lists from inlined allocators without including gc_priv.h       */
                     32: /* or introducing dependencies on internal data structure layouts.      */
                     33: ptr_t * CONST GC_objfreelist_ptr = GC_objfreelist;
                     34: ptr_t * CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
                     35: ptr_t * CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
                     36: # ifdef ATOMIC_UNCOLLECTABLE
                     37:     ptr_t * CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
                     38: # endif
                     39:
                     40: /* Allocate a composite object of size n bytes.  The caller guarantees  */
                     41: /* that pointers past the first page are not relevant.  Caller holds    */
                     42: /* allocation lock.                                                     */
                     43: ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
                     44: register size_t lb;
                     45: register int k;
                     46: {
                     47:     register struct hblk * h;
                     48:     register word n_blocks;
                     49:     register word lw;
                     50:     register ptr_t op;
                     51:
                     52:     if (lb <= HBLKSIZE)
                     53:         return(GC_generic_malloc_inner((word)lb, k));
                     54:     n_blocks = divHBLKSZ(ADD_SLOP(lb) + HDR_BYTES + HBLKSIZE-1);
                     55:     if (!GC_is_initialized) GC_init_inner();
                     56:     /* Do our share of marking work */
                     57:     if(GC_incremental && !GC_dont_gc)
                     58:         GC_collect_a_little_inner((int)n_blocks);
                     59:     lw = ROUNDED_UP_WORDS(lb);
                     60:     h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
                     61: #   ifdef USE_MUNMAP
                     62:       if (0 == h) {
                     63:         GC_merge_unmapped();
                     64:         h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
                     65:       }
                     66: #   endif
                     67:     while (0 == h && GC_collect_or_expand(n_blocks, TRUE)) {
                     68:       h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
                     69:     }
                     70:     if (h == 0) {
                     71:         op = 0;
                     72:     } else {
                     73:         op = (ptr_t) (h -> hb_body);
                     74:         GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
                     75:     }
                     76:     GC_words_allocd += lw;
                     77:     return((ptr_t)op);
                     78: }
                     79:
                     80: ptr_t GC_generic_malloc_ignore_off_page(lb, k)
                     81: register size_t lb;
                     82: register int k;
                     83: {
                     84:     register ptr_t result;
                     85:     DCL_LOCK_STATE;
                     86:
                     87:     GC_INVOKE_FINALIZERS();
                     88:     DISABLE_SIGNALS();
                     89:     LOCK();
                     90:     result = GC_generic_malloc_inner_ignore_off_page(lb,k);
                     91:     UNLOCK();
                     92:     ENABLE_SIGNALS();
                     93:     if (0 == result) {
                     94:         return((*GC_oom_fn)(lb));
                     95:     } else {
                     96:         return(result);
                     97:     }
                     98: }
                     99:
                    100: # if defined(__STDC__) || defined(__cplusplus)
                    101:   void * GC_malloc_ignore_off_page(size_t lb)
                    102: # else
                    103:   char * GC_malloc_ignore_off_page(lb)
                    104:   register size_t lb;
                    105: # endif
                    106: {
                    107:     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
                    108: }
                    109:
                    110: # if defined(__STDC__) || defined(__cplusplus)
                    111:   void * GC_malloc_atomic_ignore_off_page(size_t lb)
                    112: # else
                    113:   char * GC_malloc_atomic_ignore_off_page(lb)
                    114:   register size_t lb;
                    115: # endif
                    116: {
                    117:     return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
                    118: }
                    119:
                    120: /* Increment GC_words_allocd from code that doesn't have direct access         */
                    121: /* to GC_arrays.                                                       */
                    122: # ifdef __STDC__
                    123: void GC_incr_words_allocd(size_t n)
                    124: {
                    125:     GC_words_allocd += n;
                    126: }
                    127:
                    128: /* The same for GC_mem_freed.                          */
                    129: void GC_incr_mem_freed(size_t n)
                    130: {
                    131:     GC_mem_freed += n;
                    132: }
                    133: # endif /* __STDC__ */
                    134:
                    135: /* Analogous to the above, but assumes a small object size, and        */
                    136: /* bypasses MERGE_SIZES mechanism.  Used by gc_inline.h.               */
1.2     ! noro      137: ptr_t GC_generic_malloc_words_small_inner(lw, k)
        !           138: register word lw;
        !           139: register int k;
1.1       noro      140: {
                    141: register ptr_t op;
                    142: register ptr_t *opp;
                    143: register struct obj_kind * kind = GC_obj_kinds + k;
                    144:
                    145:     opp = &(kind -> ok_freelist[lw]);
                    146:     if( (op = *opp) == 0 ) {
                    147:         if (!GC_is_initialized) {
                    148:             GC_init_inner();
                    149:         }
                    150:        if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
                    151:            op = GC_clear_stack(GC_allocobj((word)lw, k));
                    152:        }
                    153:        if (op == 0) {
                    154:            UNLOCK();
                    155:            ENABLE_SIGNALS();
                    156:            return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
                    157:        }
                    158:     }
                    159:     *opp = obj_link(op);
                    160:     obj_link(op) = 0;
                    161:     GC_words_allocd += lw;
1.2     ! noro      162:     return((ptr_t)op);
        !           163: }
        !           164:
        !           165: /* Analogous to the above, but assumes a small object size, and        */
        !           166: /* bypasses MERGE_SIZES mechanism.  Used by gc_inline.h.               */
        !           167: #ifdef __STDC__
        !           168:      ptr_t GC_generic_malloc_words_small(size_t lw, int k)
        !           169: #else
        !           170:      ptr_t GC_generic_malloc_words_small(lw, k)
        !           171:      register word lw;
        !           172:      register int k;
        !           173: #endif
        !           174: {
        !           175: register ptr_t op;
        !           176: DCL_LOCK_STATE;
        !           177:
        !           178:     GC_INVOKE_FINALIZERS();
        !           179:     DISABLE_SIGNALS();
        !           180:     LOCK();
        !           181:     op = GC_generic_malloc_words_small_inner(lw, k);
1.1       noro      182:     UNLOCK();
                    183:     ENABLE_SIGNALS();
                    184:     return((ptr_t)op);
                    185: }
                    186:
                    187: #if defined(THREADS) && !defined(SRC_M3)
                    188: /* Return a list of 1 or more objects of the indicated size, linked    */
                    189: /* through the first word in the object.  This has the advantage that  */
                    190: /* it acquires the allocation lock only once, and may greatly reduce   */
                    191: /* time wasted contending for the allocation lock.  Typical usage would */
                    192: /* be in a thread that requires many items of the same size.  It would */
                    193: /* keep its own free list in thread-local storage, and call            */
                    194: /* GC_malloc_many or friends to replenish it.  (We do not round up     */
                    195: /* object sizes, since a call indicates the intention to consume many  */
                    196: /* objects of exactly this size.)                                      */
                    197: /* Note that the client should usually clear the link field.           */
                    198: ptr_t GC_generic_malloc_many(lb, k)
                    199: register word lb;
                    200: register int k;
                    201: {
                    202: ptr_t op;
                    203: register ptr_t p;
                    204: ptr_t *opp;
                    205: word lw;
                    206: register word my_words_allocd;
                    207: DCL_LOCK_STATE;
                    208:
                    209:     if (!SMALL_OBJ(lb)) {
                    210:         op = GC_generic_malloc(lb, k);
                    211:         if(0 != op) obj_link(op) = 0;
                    212:         return(op);
                    213:     }
                    214:     lw = ALIGNED_WORDS(lb);
                    215:     GC_INVOKE_FINALIZERS();
                    216:     DISABLE_SIGNALS();
                    217:     LOCK();
                    218:     opp = &(GC_obj_kinds[k].ok_freelist[lw]);
                    219:     if( (op = *opp) == 0 ) {
                    220:         if (!GC_is_initialized) {
                    221:             GC_init_inner();
                    222:         }
                    223:        op = GC_clear_stack(GC_allocobj(lw, k));
                    224:        if (op == 0) {
                    225:            UNLOCK();
                    226:            ENABLE_SIGNALS();
                    227:            op = (*GC_oom_fn)(lb);
                    228:            if(0 != op) obj_link(op) = 0;
                    229:             return(op);
                    230:        }
                    231:     }
                    232:     *opp = 0;
                    233:     my_words_allocd = 0;
                    234:     for (p = op; p != 0; p = obj_link(p)) {
                    235:         my_words_allocd += lw;
                    236:         if (my_words_allocd >= BODY_SZ) {
                    237:             *opp = obj_link(p);
                    238:             obj_link(p) = 0;
                    239:             break;
                    240:         }
                    241:     }
                    242:     GC_words_allocd += my_words_allocd;
                    243:
                    244: out:
                    245:     UNLOCK();
                    246:     ENABLE_SIGNALS();
                    247:     return(op);
                    248:
                    249: }
                    250:
                    251: void * GC_malloc_many(size_t lb)
                    252: {
                    253:     return(GC_generic_malloc_many(lb, NORMAL));
                    254: }
                    255:
                    256: /* Note that the "atomic" version of this would be unsafe, since the   */
                    257: /* links would not be seen by the collector.                           */
                    258: # endif
                    259:
                    260: /* Allocate lb bytes of pointerful, traced, but not collectable data */
                    261: # ifdef __STDC__
                    262:     GC_PTR GC_malloc_uncollectable(size_t lb)
                    263: # else
                    264:     GC_PTR GC_malloc_uncollectable(lb)
                    265:     size_t lb;
                    266: # endif
                    267: {
                    268: register ptr_t op;
                    269: register ptr_t *opp;
                    270: register word lw;
                    271: DCL_LOCK_STATE;
                    272:
                    273:     if( SMALL_OBJ(lb) ) {
                    274: #       ifdef MERGE_SIZES
                    275: #        ifdef ADD_BYTE_AT_END
                    276:            if (lb != 0) lb--;
                    277:                  /* We don't need the extra byte, since this won't be  */
                    278:                  /* collected anyway.                                  */
                    279: #        endif
                    280:          lw = GC_size_map[lb];
                    281: #      else
                    282:          lw = ALIGNED_WORDS(lb);
                    283: #       endif
                    284:        opp = &(GC_uobjfreelist[lw]);
                    285:        FASTLOCK();
                    286:         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
                    287:             /* See above comment on signals.   */
                    288:             *opp = obj_link(op);
                    289:             obj_link(op) = 0;
                    290:             GC_words_allocd += lw;
                    291:             /* Mark bit ws already set on free list.  It will be       */
                    292:            /* cleared only temporarily during a collection, as a       */
                    293:            /* result of the normal free list mark bit clearing.        */
                    294:             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    295:             FASTUNLOCK();
                    296:             return((GC_PTR) op);
                    297:         }
                    298:         FASTUNLOCK();
                    299:         op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
                    300:     } else {
                    301:        op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
                    302:     }
                    303:     if (0 == op) return(0);
                    304:     /* We don't need the lock here, since we have an undisguised       */
                    305:     /* pointer.  We do need to hold the lock while we adjust           */
                    306:     /* mark bits.                                                      */
                    307:     {
                    308:        register struct hblk * h;
                    309:
                    310:        h = HBLKPTR(op);
                    311:        lw = HDR(h) -> hb_sz;
                    312:
                    313:        DISABLE_SIGNALS();
                    314:        LOCK();
                    315:        GC_set_mark_bit(op);
                    316:        GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    317:        UNLOCK();
                    318:        ENABLE_SIGNALS();
                    319:        return((GC_PTR) op);
                    320:     }
                    321: }
                    322:
                    323: # ifdef ATOMIC_UNCOLLECTABLE
                    324: /* Allocate lb bytes of pointerfree, untraced, uncollectable data      */
                    325: /* This is normally roughly equivalent to the system malloc.           */
                    326: /* But it may be useful if malloc is redefined.                                */
                    327: # ifdef __STDC__
                    328:     GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
                    329: # else
                    330:     GC_PTR GC_malloc_atomic_uncollectable(lb)
                    331:     size_t lb;
                    332: # endif
                    333: {
                    334: register ptr_t op;
                    335: register ptr_t *opp;
                    336: register word lw;
                    337: DCL_LOCK_STATE;
                    338:
                    339:     if( SMALL_OBJ(lb) ) {
                    340: #       ifdef MERGE_SIZES
                    341: #        ifdef ADD_BYTE_AT_END
                    342:            if (lb != 0) lb--;
                    343:                  /* We don't need the extra byte, since this won't be  */
                    344:                  /* collected anyway.                                  */
                    345: #        endif
                    346:          lw = GC_size_map[lb];
                    347: #      else
                    348:          lw = ALIGNED_WORDS(lb);
                    349: #       endif
                    350:        opp = &(GC_auobjfreelist[lw]);
                    351:        FASTLOCK();
                    352:         if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
                    353:             /* See above comment on signals.   */
                    354:             *opp = obj_link(op);
                    355:             obj_link(op) = 0;
                    356:             GC_words_allocd += lw;
                    357:            /* Mark bit was already set while object was on free list. */
                    358:             GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    359:             FASTUNLOCK();
                    360:             return((GC_PTR) op);
                    361:         }
                    362:         FASTUNLOCK();
                    363:         op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
                    364:     } else {
                    365:        op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
                    366:     }
                    367:     if (0 == op) return(0);
                    368:     /* We don't need the lock here, since we have an undisguised       */
                    369:     /* pointer.  We do need to hold the lock while we adjust           */
                    370:     /* mark bits.                                                      */
                    371:     {
                    372:        register struct hblk * h;
                    373:
                    374:        h = HBLKPTR(op);
                    375:        lw = HDR(h) -> hb_sz;
                    376:
                    377:        DISABLE_SIGNALS();
                    378:        LOCK();
                    379:        GC_set_mark_bit(op);
                    380:        GC_non_gc_bytes += WORDS_TO_BYTES(lw);
                    381:        UNLOCK();
                    382:        ENABLE_SIGNALS();
                    383:        return((GC_PTR) op);
                    384:     }
                    385: }
                    386:
                    387: #endif /* ATOMIC_UNCOLLECTABLE */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>