[BACK]Return to mallocx.c CVS log [TXT][DIR] Up to [local] / OpenXM_contrib2 / asir2000 / gc

Diff for /OpenXM_contrib2/asir2000/gc/mallocx.c between version 1.2 and 1.5

version 1.2, 2000/12/01 09:26:11 version 1.5, 2002/07/24 08:00:10
Line 2 
Line 2 
  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers   * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.   * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.   * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
    * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
  *   *
  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED   * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.   * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
Line 21 
Line 22 
  */   */
   
 #include <stdio.h>  #include <stdio.h>
 #include "gc_priv.h"  #include "private/gc_priv.h"
   
 extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */  extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
 void GC_extend_size_map();      /* in misc.c. */  void GC_extend_size_map();      /* in misc.c. */
Line 30  GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
Line 31  GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
 /* Some externally visible but unadvertised variables to allow access to */  /* Some externally visible but unadvertised variables to allow access to */
 /* free lists from inlined allocators without including gc_priv.h        */  /* free lists from inlined allocators without including gc_priv.h        */
 /* or introducing dependencies on internal data structure layouts.       */  /* or introducing dependencies on internal data structure layouts.       */
 ptr_t * CONST GC_objfreelist_ptr = GC_objfreelist;  ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
 ptr_t * CONST GC_aobjfreelist_ptr = GC_aobjfreelist;  ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
 ptr_t * CONST GC_uobjfreelist_ptr = GC_uobjfreelist;  ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
 # ifdef ATOMIC_UNCOLLECTABLE  # ifdef ATOMIC_UNCOLLECTABLE
     ptr_t * CONST GC_auobjfreelist_ptr = GC_auobjfreelist;      ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
 # endif  # endif
   
 /* Allocate a composite object of size n bytes.  The caller guarantees  */  
 /* that pointers past the first page are not relevant.  Caller holds    */  GC_PTR GC_generic_or_special_malloc(lb,knd)
 /* allocation lock.                                                     */  word lb;
 ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)  int knd;
 register size_t lb;  
 register int k;  
 {  {
     register struct hblk * h;      switch(knd) {
     register word n_blocks;  #     ifdef STUBBORN_ALLOC
     register word lw;          case STUBBORN:
     register ptr_t op;              return(GC_malloc_stubborn((size_t)lb));
   #     endif
           case PTRFREE:
               return(GC_malloc_atomic((size_t)lb));
           case NORMAL:
               return(GC_malloc((size_t)lb));
           case UNCOLLECTABLE:
               return(GC_malloc_uncollectable((size_t)lb));
   #       ifdef ATOMIC_UNCOLLECTABLE
             case AUNCOLLECTABLE:
               return(GC_malloc_atomic_uncollectable((size_t)lb));
   #       endif /* ATOMIC_UNCOLLECTABLE */
           default:
               return(GC_generic_malloc(lb,knd));
       }
   }
   
     if (lb <= HBLKSIZE)  
         return(GC_generic_malloc_inner((word)lb, k));  /* Change the size of the block pointed to by p to contain at least   */
     n_blocks = divHBLKSZ(ADD_SLOP(lb) + HDR_BYTES + HBLKSIZE-1);  /* lb bytes.  The object may be (and quite likely will be) moved.     */
     if (!GC_is_initialized) GC_init_inner();  /* The kind (e.g. atomic) is the same as that of the old.             */
     /* Do our share of marking work */  /* Shrinking of large blocks is not implemented well.                 */
     if(GC_incremental && !GC_dont_gc)  # ifdef __STDC__
         GC_collect_a_little_inner((int)n_blocks);      GC_PTR GC_realloc(GC_PTR p, size_t lb)
     lw = ROUNDED_UP_WORDS(lb);  # else
     h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);      GC_PTR GC_realloc(p,lb)
 #   ifdef USE_MUNMAP      GC_PTR p;
       if (0 == h) {      size_t lb;
         GC_merge_unmapped();  # endif
         h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);  {
       }  register struct hblk * h;
 #   endif  register hdr * hhdr;
     while (0 == h && GC_collect_or_expand(n_blocks, TRUE)) {  register word sz;        /* Current size in bytes       */
       h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);  register word orig_sz;   /* Original sz in bytes        */
   int obj_kind;
   
       if (p == 0) return(GC_malloc(lb));  /* Required by ANSI */
       h = HBLKPTR(p);
       hhdr = HDR(h);
       sz = hhdr -> hb_sz;
       obj_kind = hhdr -> hb_obj_kind;
       sz = WORDS_TO_BYTES(sz);
       orig_sz = sz;
   
       if (sz > MAXOBJBYTES) {
           /* Round it up to the next whole heap block */
             register word descr;
   
             sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
             hhdr -> hb_sz = BYTES_TO_WORDS(sz);
             descr = GC_obj_kinds[obj_kind].ok_descriptor;
             if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
             hhdr -> hb_descr = descr;
             if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
             /* Extra area is already cleared by GC_alloc_large_and_clear. */
     }      }
     if (h == 0) {      if (ADD_SLOP(lb) <= sz) {
         op = 0;          if (lb >= (sz >> 1)) {
   #           ifdef STUBBORN_ALLOC
                   if (obj_kind == STUBBORN) GC_change_stubborn(p);
   #           endif
               if (orig_sz > lb) {
                 /* Clear unneeded part of object to avoid bogus pointer */
                 /* tracing.                                             */
                 /* Safe for stubborn objects.                           */
                   BZERO(((ptr_t)p) + lb, orig_sz - lb);
               }
               return(p);
           } else {
               /* shrink */
                 GC_PTR result =
                           GC_generic_or_special_malloc((word)lb, obj_kind);
   
                 if (result == 0) return(0);
                     /* Could also return original object.  But this       */
                     /* gives the client warning of imminent disaster.     */
                 BCOPY(p, result, lb);
   #             ifndef IGNORE_FREE
                   GC_free(p);
   #             endif
                 return(result);
           }
     } else {      } else {
         op = (ptr_t) (h -> hb_body);          /* grow */
         GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;            GC_PTR result =
                   GC_generic_or_special_malloc((word)lb, obj_kind);
   
             if (result == 0) return(0);
             BCOPY(p, result, sz);
   #         ifndef IGNORE_FREE
               GC_free(p);
   #         endif
             return(result);
     }      }
     GC_words_allocd += lw;  
     return((ptr_t)op);  
 }  }
   
   # if defined(REDIRECT_MALLOC) || defined(REDIRECT_REALLOC)
   # ifdef __STDC__
       GC_PTR realloc(GC_PTR p, size_t lb)
   # else
       GC_PTR realloc(p,lb)
       GC_PTR p;
       size_t lb;
   # endif
     {
   #   ifdef REDIRECT_REALLOC
         return(REDIRECT_REALLOC(p, lb));
   #   else
         return(GC_realloc(p, lb));
   #   endif
     }
   # endif /* REDIRECT_MALLOC */
   
   
   /* The same thing, except caller does not hold allocation lock. */
   /* We avoid holding allocation lock while we clear memory.      */
 ptr_t GC_generic_malloc_ignore_off_page(lb, k)  ptr_t GC_generic_malloc_ignore_off_page(lb, k)
 register size_t lb;  register size_t lb;
 register int k;  register int k;
 {  {
     register ptr_t result;      register ptr_t result;
       word lw;
       word n_blocks;
       GC_bool init;
     DCL_LOCK_STATE;      DCL_LOCK_STATE;
   
       if (SMALL_OBJ(lb))
           return(GC_generic_malloc((word)lb, k));
       lw = ROUNDED_UP_WORDS(lb);
       n_blocks = OBJ_SZ_TO_BLOCKS(lw);
       init = GC_obj_kinds[k].ok_init;
       if (GC_debugging_started) GC_print_all_smashed();
     GC_INVOKE_FINALIZERS();      GC_INVOKE_FINALIZERS();
     DISABLE_SIGNALS();      DISABLE_SIGNALS();
     LOCK();      LOCK();
     result = GC_generic_malloc_inner_ignore_off_page(lb,k);      result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
       if (0 != result) {
           if (GC_debugging_started) {
               BZERO(result, n_blocks * HBLKSIZE);
           } else {
   #           ifdef THREADS
                 /* Clear any memory that might be used for GC descriptors */
                 /* before we release the lock.                          */
                   ((word *)result)[0] = 0;
                   ((word *)result)[1] = 0;
                   ((word *)result)[lw-1] = 0;
                   ((word *)result)[lw-2] = 0;
   #           endif
           }
       }
       GC_words_allocd += lw;
     UNLOCK();      UNLOCK();
     ENABLE_SIGNALS();      ENABLE_SIGNALS();
     if (0 == result) {      if (0 == result) {
         return((*GC_oom_fn)(lb));          return((*GC_oom_fn)(lb));
     } else {      } else {
           if (init & !GC_debugging_started) {
               BZERO(result, n_blocks * HBLKSIZE);
           }
         return(result);          return(result);
     }      }
 }  }
Line 175  register struct obj_kind * kind = GC_obj_kinds + k;
Line 287  register struct obj_kind * kind = GC_obj_kinds + k;
 register ptr_t op;  register ptr_t op;
 DCL_LOCK_STATE;  DCL_LOCK_STATE;
   
       if (GC_debugging_started) GC_print_all_smashed();
     GC_INVOKE_FINALIZERS();      GC_INVOKE_FINALIZERS();
     DISABLE_SIGNALS();      DISABLE_SIGNALS();
     LOCK();      LOCK();
Line 185  DCL_LOCK_STATE;
Line 298  DCL_LOCK_STATE;
 }  }
   
 #if defined(THREADS) && !defined(SRC_M3)  #if defined(THREADS) && !defined(SRC_M3)
   
   extern signed_word GC_mem_found;   /* Protected by GC lock.  */
   
   #ifdef PARALLEL_MARK
   volatile signed_word GC_words_allocd_tmp = 0;
                           /* Number of words of memory allocated since    */
                           /* we released the GC lock.  Instead of         */
                           /* reacquiring the GC lock just to add this in, */
                           /* we add it in the next time we reacquire      */
                           /* the lock.  (Atomically adding it doesn't     */
                           /* work, since we would have to atomically      */
                           /* update it in GC_malloc, which is too         */
                           /* expensive.                                   */
   #endif /* PARALLEL_MARK */
   
   /* See reclaim.c: */
   extern ptr_t GC_reclaim_generic();
   
 /* Return a list of 1 or more objects of the indicated size, linked     */  /* Return a list of 1 or more objects of the indicated size, linked     */
 /* through the first word in the object.  This has the advantage that   */  /* through the first word in the object.  This has the advantage that   */
 /* it acquires the allocation lock only once, and may greatly reduce    */  /* it acquires the allocation lock only once, and may greatly reduce    */
Line 194  DCL_LOCK_STATE;
Line 325  DCL_LOCK_STATE;
 /* GC_malloc_many or friends to replenish it.  (We do not round up      */  /* GC_malloc_many or friends to replenish it.  (We do not round up      */
 /* object sizes, since a call indicates the intention to consume many   */  /* object sizes, since a call indicates the intention to consume many   */
 /* objects of exactly this size.)                                       */  /* objects of exactly this size.)                                       */
   /* We return the free-list by assigning it to *result, since it is      */
   /* not safe to return, e.g. a linked list of pointer-free objects,      */
   /* since the collector would not retain the entire list if it were      */
   /* invoked just as we were returning.                                   */
 /* Note that the client should usually clear the link field.            */  /* Note that the client should usually clear the link field.            */
 ptr_t GC_generic_malloc_many(lb, k)  void GC_generic_malloc_many(lb, k, result)
 register word lb;  register word lb;
 register int k;  register int k;
   ptr_t *result;
 {  {
 ptr_t op;  ptr_t op;
 register ptr_t p;  ptr_t p;
 ptr_t *opp;  ptr_t *opp;
 word lw;  word lw;
 register word my_words_allocd;  word my_words_allocd = 0;
   struct obj_kind * ok = &(GC_obj_kinds[k]);
 DCL_LOCK_STATE;  DCL_LOCK_STATE;
   
   #   if defined(GATHERSTATS) || defined(PARALLEL_MARK)
   #     define COUNT_ARG , &my_words_allocd
   #   else
   #     define COUNT_ARG
   #     define NEED_TO_COUNT
   #   endif
     if (!SMALL_OBJ(lb)) {      if (!SMALL_OBJ(lb)) {
         op = GC_generic_malloc(lb, k);          op = GC_generic_malloc(lb, k);
         if(0 != op) obj_link(op) = 0;          if(0 != op) obj_link(op) = 0;
         return(op);          *result = op;
           return;
     }      }
     lw = ALIGNED_WORDS(lb);      lw = ALIGNED_WORDS(lb);
       if (GC_debugging_started) GC_print_all_smashed();
     GC_INVOKE_FINALIZERS();      GC_INVOKE_FINALIZERS();
     DISABLE_SIGNALS();      DISABLE_SIGNALS();
     LOCK();      LOCK();
     opp = &(GC_obj_kinds[k].ok_freelist[lw]);      if (!GC_is_initialized) GC_init_inner();
     if( (op = *opp) == 0 ) {      /* Do our share of marking work */
         if (!GC_is_initialized) {        if (GC_incremental && !GC_dont_gc) {
             GC_init_inner();          ENTER_GC();
         }          GC_collect_a_little_inner(1);
         op = GC_clear_stack(GC_allocobj(lw, k));          EXIT_GC();
         if (op == 0) {        }
             UNLOCK();      /* First see if we can reclaim a page of objects waiting to be */
             ENABLE_SIGNALS();      /* reclaimed.                                                  */
             op = (*GC_oom_fn)(lb);      {
             if(0 != op) obj_link(op) = 0;          struct hblk ** rlh = ok -> ok_reclaim_list;
             return(op);          struct hblk * hbp;
         }          hdr * hhdr;
   
           rlh += lw;
           while ((hbp = *rlh) != 0) {
               hhdr = HDR(hbp);
               *rlh = hhdr -> hb_next;
               hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
   #           ifdef PARALLEL_MARK
                   {
                     signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
   
                     GC_ASSERT(my_words_allocd_tmp >= 0);
                     /* We only decrement it while holding the GC lock.    */
                     /* Thus we can't accidentally adjust it down in more  */
                     /* than one thread simultaneously.                    */
                     if (my_words_allocd_tmp != 0) {
                       (void)GC_atomic_add(
                                   (volatile GC_word *)(&GC_words_allocd_tmp),
                                   (GC_word)(-my_words_allocd_tmp));
                       GC_words_allocd += my_words_allocd_tmp;
                     }
                   }
                   GC_acquire_mark_lock();
                   ++ GC_fl_builder_count;
                   UNLOCK();
                   ENABLE_SIGNALS();
                   GC_release_mark_lock();
   #           endif
               op = GC_reclaim_generic(hbp, hhdr, lw,
                                       ok -> ok_init, 0 COUNT_ARG);
               if (op != 0) {
   #             ifdef NEED_TO_COUNT
                   /* We are neither gathering statistics, nor marking in  */
                   /* parallel.  Thus GC_reclaim_generic doesn't count     */
                   /* for us.                                              */
                   for (p = op; p != 0; p = obj_link(p)) {
                     my_words_allocd += lw;
                   }
   #             endif
   #             if defined(GATHERSTATS)
                   /* We also reclaimed memory, so we need to adjust       */
                   /* that count.                                          */
                   /* This should be atomic, so the results may be         */
                   /* inaccurate.                                          */
                   GC_mem_found += my_words_allocd;
   #             endif
   #             ifdef PARALLEL_MARK
                   *result = op;
                   (void)GC_atomic_add(
                                   (volatile GC_word *)(&GC_words_allocd_tmp),
                                   (GC_word)(my_words_allocd));
                   GC_acquire_mark_lock();
                   -- GC_fl_builder_count;
                   if (GC_fl_builder_count == 0) GC_notify_all_builder();
                   GC_release_mark_lock();
                   (void) GC_clear_stack(0);
                   return;
   #             else
                   GC_words_allocd += my_words_allocd;
                   goto out;
   #             endif
               }
   #           ifdef PARALLEL_MARK
                 GC_acquire_mark_lock();
                 -- GC_fl_builder_count;
                 if (GC_fl_builder_count == 0) GC_notify_all_builder();
                 GC_release_mark_lock();
                 DISABLE_SIGNALS();
                 LOCK();
                 /* GC lock is needed for reclaim list access.     We      */
                 /* must decrement fl_builder_count before reaquiring GC   */
                 /* lock.  Hopefully this path is rare.                    */
   #           endif
           }
     }      }
     *opp = 0;      /* Next try to use prefix of global free list if there is one.      */
     my_words_allocd = 0;      /* We don't refill it, but we need to use it up before allocating   */
     for (p = op; p != 0; p = obj_link(p)) {      /* a new block ourselves.                                           */
         my_words_allocd += lw;        opp = &(GC_obj_kinds[k].ok_freelist[lw]);
         if (my_words_allocd >= BODY_SZ) {        if ( (op = *opp) != 0 ) {
           *opp = 0;
           my_words_allocd = 0;
           for (p = op; p != 0; p = obj_link(p)) {
             my_words_allocd += lw;
             if (my_words_allocd >= BODY_SZ) {
             *opp = obj_link(p);              *opp = obj_link(p);
             obj_link(p) = 0;              obj_link(p) = 0;
             break;              break;
             }
         }          }
           GC_words_allocd += my_words_allocd;
           goto out;
         }
       /* Next try to allocate a new block worth of objects of this size.  */
       {
           struct hblk *h = GC_allochblk(lw, k, 0);
           if (h != 0) {
             if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
             GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
                                  - BYTES_TO_WORDS(HBLKSIZE) % lw;
   #         ifdef PARALLEL_MARK
               GC_acquire_mark_lock();
               ++ GC_fl_builder_count;
               UNLOCK();
               ENABLE_SIGNALS();
               GC_release_mark_lock();
   #         endif
   
             op = GC_build_fl(h, lw, ok -> ok_init, 0);
   #         ifdef PARALLEL_MARK
               *result = op;
               GC_acquire_mark_lock();
               -- GC_fl_builder_count;
               if (GC_fl_builder_count == 0) GC_notify_all_builder();
               GC_release_mark_lock();
               (void) GC_clear_stack(0);
               return;
   #         else
               goto out;
   #         endif
           }
     }      }
     GC_words_allocd += my_words_allocd;  
   
 out:      /* As a last attempt, try allocating a single object.  Note that    */
       /* this may trigger a collection or expand the heap.                */
         op = GC_generic_malloc_inner(lb, k);
         if (0 != op) obj_link(op) = 0;
   
     out:
       *result = op;
     UNLOCK();      UNLOCK();
     ENABLE_SIGNALS();      ENABLE_SIGNALS();
     return(op);      (void) GC_clear_stack(0);
   
 }  }
   
 void * GC_malloc_many(size_t lb)  GC_PTR GC_malloc_many(size_t lb)
 {  {
     return(GC_generic_malloc_many(lb, NORMAL));      ptr_t result;
       GC_generic_malloc_many(lb, NORMAL, &result);
       return result;
 }  }
   
 /* Note that the "atomic" version of this would be unsafe, since the    */  /* Note that the "atomic" version of this would be unsafe, since the    */
Line 272  DCL_LOCK_STATE;
Line 533  DCL_LOCK_STATE;
   
     if( SMALL_OBJ(lb) ) {      if( SMALL_OBJ(lb) ) {
 #       ifdef MERGE_SIZES  #       ifdef MERGE_SIZES
 #         ifdef ADD_BYTE_AT_END            if (EXTRA_BYTES != 0 && lb != 0) lb--;
             if (lb != 0) lb--;  
                   /* We don't need the extra byte, since this won't be  */                    /* We don't need the extra byte, since this won't be  */
                   /* collected anyway.                                  */                    /* collected anyway.                                  */
 #         endif  
           lw = GC_size_map[lb];            lw = GC_size_map[lb];
 #       else  #       else
           lw = ALIGNED_WORDS(lb);            lw = ALIGNED_WORDS(lb);
Line 338  DCL_LOCK_STATE;
Line 597  DCL_LOCK_STATE;
   
     if( SMALL_OBJ(lb) ) {      if( SMALL_OBJ(lb) ) {
 #       ifdef MERGE_SIZES  #       ifdef MERGE_SIZES
 #         ifdef ADD_BYTE_AT_END            if (EXTRA_BYTES != 0 && lb != 0) lb--;
             if (lb != 0) lb--;  
                   /* We don't need the extra byte, since this won't be  */                    /* We don't need the extra byte, since this won't be  */
                   /* collected anyway.                                  */                    /* collected anyway.                                  */
 #         endif  
           lw = GC_size_map[lb];            lw = GC_size_map[lb];
 #       else  #       else
           lw = ALIGNED_WORDS(lb);            lw = ALIGNED_WORDS(lb);

Legend:
Removed from v.1.2  
changed lines
  Added in v.1.5

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>