[BACK]Return to allchblk.c CVS log [TXT][DIR] Up to [local] / OpenXM_contrib / gc

Diff for /OpenXM_contrib/gc/Attic/allchblk.c between version 1.1.1.1 and 1.1.1.3

version 1.1.1.1, 1999/11/27 10:58:32 version 1.1.1.3, 2000/12/01 14:48:23
Line 1 
Line 1 
 /*  /*
  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers   * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.   * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
  * Copyright (c) 1998 by Silicon Graphics.  All rights reserved.   * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.
    * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  *   *
  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED   * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.   * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
Line 12 
Line 13 
  * provided the above notices are retained, and a notice that the code was   * provided the above notices are retained, and a notice that the code was
  * modified is included with the above copyright notice.   * modified is included with the above copyright notice.
  */   */
 /* Boehm, August 9, 1995 5:08 pm PDT */  
   
 #define DEBUG  #define DEBUG
 #undef DEBUG  #undef DEBUG
 #include <stdio.h>  #include <stdio.h>
 #include "gc_priv.h"  #include "gc_priv.h"
   
   GC_bool GC_use_entire_heap = 0;
   
 /*  /*
  * allocate/free routines for heap blocks   * Free heap blocks are kept on one of several free lists,
  * Note that everything called from outside the garbage collector   * depending on the size of the block.  Each free list is doubly linked.
  * should be prepared to abort at any point as the result of a signal.   * Adjacent free blocks are coalesced.
  */   */
   
 /*  
  * Free heap blocks are kept on a list sorted by address.  
  * The hb_hdr.hbh_sz field of a free heap block contains the length  
  * (in bytes) of the entire block.  
  * Neighbors are coalesced.  
  */  
   
 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)  # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
                 /* largest block we will allocate starting on a black   */                  /* largest block we will allocate starting on a black   */
                 /* listed block.  Must be >= HBLKSIZE.                  */                  /* listed block.  Must be >= HBLKSIZE.                  */
   
 struct hblk * GC_hblkfreelist = 0;  
   
 struct hblk *GC_savhbp = (struct hblk *)0;  /* heap block preceding next */  # define UNIQUE_THRESHOLD 32
                                          /* block to be examined by   */          /* Sizes up to this many HBLKs each have their own free list    */
                                          /* GC_allochblk.                */  # define HUGE_THRESHOLD 256
           /* Sizes of at least this many heap blocks are mapped to a      */
           /* single free list.                                            */
   # define FL_COMPRESSION 8
           /* In between sizes map this many distinct sizes to a single    */
           /* bin.                                                         */
   
   # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
                                    + UNIQUE_THRESHOLD
   
   struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
   
   /* Map a number of blocks to the appropriate large block free list index. */
   int GC_hblk_fl_from_blocks(blocks_needed)
   word blocks_needed;
   {
       if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
       if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
       return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
                                           + UNIQUE_THRESHOLD;
   
   }
   
   # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
   # define PHDR(hhdr) HDR(hhdr -> hb_prev)
   # define NHDR(hhdr) HDR(hhdr -> hb_next)
   
   # ifdef USE_MUNMAP
   #   define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
   # else  /* !USE_MMAP */
   #   define IS_MAPPED(hhdr) 1
   # endif /* USE_MUNMAP */
   
 # if !defined(NO_DEBUGGING)  # if !defined(NO_DEBUGGING)
 void GC_print_hblkfreelist()  void GC_print_hblkfreelist()
 {  {
     struct hblk * h = GC_hblkfreelist;      struct hblk * h;
     word total_free = 0;      word total_free = 0;
     hdr * hhdr = HDR(h);      hdr * hhdr;
     word sz;      word sz;
       int i;
   
     while (h != 0) {      for (i = 0; i <= N_HBLK_FLS; ++i) {
         h = GC_hblkfreelist[i];
         if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
         while (h != 0) {
           hhdr = HDR(h);
         sz = hhdr -> hb_sz;          sz = hhdr -> hb_sz;
         GC_printf2("0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);          GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
         total_free += sz;          total_free += sz;
         if (GC_is_black_listed(h, HBLKSIZE) != 0) {          if (GC_is_black_listed(h, HBLKSIZE) != 0) {
              GC_printf0("start black listed\n");               GC_printf0("start black listed\n");
Line 63  void GC_print_hblkfreelist()
Line 93  void GC_print_hblkfreelist()
              GC_printf0("not black listed\n");               GC_printf0("not black listed\n");
         }          }
         h = hhdr -> hb_next;          h = hhdr -> hb_next;
         hhdr = HDR(h);        }
     }      }
       if (total_free != GC_large_free_bytes) {
           GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
                      (unsigned long) GC_large_free_bytes);
       }
     GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);      GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
 }  }
   
   /* Return the free list index on which the block described by the header */
   /* appears, or -1 if it appears nowhere.                                 */
   int free_list_index_of(wanted)
   hdr * wanted;
   {
       struct hblk * h;
       hdr * hhdr;
       int i;
   
       for (i = 0; i <= N_HBLK_FLS; ++i) {
         h = GC_hblkfreelist[i];
         while (h != 0) {
           hhdr = HDR(h);
           if (hhdr == wanted) return i;
           h = hhdr -> hb_next;
         }
       }
       return -1;
   }
   
   void GC_dump_regions()
   {
       unsigned i;
       ptr_t start, end;
       ptr_t p;
       size_t bytes;
       hdr *hhdr;
       for (i = 0; i < GC_n_heap_sects; ++i) {
           start = GC_heap_sects[i].hs_start;
           bytes = GC_heap_sects[i].hs_bytes;
           end = start + bytes;
           /* Merge in contiguous sections.        */
             while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
               ++i;
               end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
             }
           GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
           for (p = start; p < end;) {
               hhdr = HDR(p);
               GC_printf1("\t0x%lx ", (unsigned long)p);
               if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
                   GC_printf1("Missing header!!\n", hhdr);
                   p += HBLKSIZE;
                   continue;
               }
               if (HBLK_IS_FREE(hhdr)) {
                   int correct_index = GC_hblk_fl_from_blocks(
                                           divHBLKSZ(hhdr -> hb_sz));
                   int actual_index;
   
                   GC_printf1("\tfree block of size 0x%lx bytes",
                              (unsigned long)(hhdr -> hb_sz));
                   if (IS_MAPPED(hhdr)) {
                       GC_printf0("\n");
                   } else {
                       GC_printf0("(unmapped)\n");
                   }
                   actual_index = free_list_index_of(hhdr);
                   if (-1 == actual_index) {
                       GC_printf1("\t\tBlock not on free list %ld!!\n",
                                   correct_index);
                   } else if (correct_index != actual_index) {
                       GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
                                  actual_index, correct_index);
                   }
                   p += hhdr -> hb_sz;
               } else {
                   GC_printf1("\tused for blocks of size 0x%lx bytes\n",
                              (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
                   p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
               }
           }
       }
   }
   
 # endif /* NO_DEBUGGING */  # endif /* NO_DEBUGGING */
   
 /* Initialize hdr for a block containing the indicated size and         */  /* Initialize hdr for a block containing the indicated size and         */
Line 100  unsigned char flags;
Line 209  unsigned char flags;
     return(TRUE);      return(TRUE);
 }  }
   
 #ifdef EXACT_FIRST  #define FL_UNKNOWN -1
 #   define LAST_TRIP 2  /*
 #else   * Remove hhdr from the appropriate free list.
 #   define LAST_TRIP 1   * We assume it is on the nth free list, or on the size
 #endif   * appropriate free list if n is FL_UNKNOWN.
    */
   void GC_remove_from_fl(hhdr, n)
   hdr * hhdr;
   int n;
   {
       GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
       if (hhdr -> hb_prev == 0) {
           int index;
           if (FL_UNKNOWN == n) {
               index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
           } else {
               index = n;
           }
           GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
           GC_hblkfreelist[index] = hhdr -> hb_next;
       } else {
           hdr *phdr;
           GET_HDR(hhdr -> hb_prev, phdr);
           phdr -> hb_next = hhdr -> hb_next;
       }
       if (0 != hhdr -> hb_next) {
           hdr * nhdr;
           GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
           GET_HDR(hhdr -> hb_next, nhdr);
           nhdr -> hb_prev = hhdr -> hb_prev;
       }
   }
   
 word GC_max_hblk_size = HBLKSIZE;  /*
    * Return a pointer to the free block ending just before h, if any.
    */
   struct hblk * GC_free_block_ending_at(h)
   struct hblk *h;
   {
       struct hblk * p = h - 1;
       hdr * phdr;
   
       GET_HDR(p, phdr);
       while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
           p = FORWARDED_ADDR(p,phdr);
           phdr = HDR(p);
       }
       if (0 != phdr) {
           if(HBLK_IS_FREE(phdr)) {
               return p;
           } else {
               return 0;
           }
       }
       p = GC_prev_block(h - 1);
       if (0 != p) {
         phdr = HDR(p);
         if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
           return p;
         }
       }
       return 0;
   }
   
   /*
    * Add hhdr to the appropriate free list.
    * We maintain individual free lists sorted by address.
    */
   void GC_add_to_fl(h, hhdr)
   struct hblk *h;
   hdr * hhdr;
   {
       int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
       struct hblk *second = GC_hblkfreelist[index];
       hdr * second_hdr;
   #   ifdef GC_ASSERTIONS
         struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
         hdr * nexthdr = HDR(next);
         struct hblk *prev = GC_free_block_ending_at(h);
         hdr * prevhdr = HDR(prev);
         GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
         GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
   #   endif
       GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
       GC_hblkfreelist[index] = h;
       hhdr -> hb_next = second;
       hhdr -> hb_prev = 0;
       if (0 != second) {
         GET_HDR(second, second_hdr);
         second_hdr -> hb_prev = h;
       }
       GC_invalidate_map(hhdr);
   }
   
   #ifdef USE_MUNMAP
   
   /* Unmap blocks that haven't been recently touched.  This is the only way */
   /* way blocks are ever unmapped.                                          */
   void GC_unmap_old(void)
   {
       struct hblk * h;
       hdr * hhdr;
       word sz;
       unsigned short last_rec, threshold;
       int i;
   #   define UNMAP_THRESHOLD 6
   
       for (i = 0; i <= N_HBLK_FLS; ++i) {
         for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
           hhdr = HDR(h);
           if (!IS_MAPPED(hhdr)) continue;
           threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
           last_rec = hhdr -> hb_last_reclaimed;
           if (last_rec > GC_gc_no
               || last_rec < threshold && threshold < GC_gc_no
                                          /* not recently wrapped */) {
             sz = hhdr -> hb_sz;
             GC_unmap((ptr_t)h, sz);
             hhdr -> hb_flags |= WAS_UNMAPPED;
           }
         }
       }
   }
   
   /* Merge all unmapped blocks that are adjacent to other free            */
   /* blocks.  This may involve remapping, since all blocks are either     */
   /* fully mapped or fully unmapped.                                      */
   void GC_merge_unmapped(void)
   {
       struct hblk * h, *next;
       hdr * hhdr, *nexthdr;
       word size, nextsize;
       int i;
   
       for (i = 0; i <= N_HBLK_FLS; ++i) {
         h = GC_hblkfreelist[i];
         while (h != 0) {
           GET_HDR(h, hhdr);
           size = hhdr->hb_sz;
           next = (struct hblk *)((word)h + size);
           GET_HDR(next, nexthdr);
           /* Coalesce with successor, if possible */
             if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
               nextsize = nexthdr -> hb_sz;
               if (IS_MAPPED(hhdr)) {
                 GC_ASSERT(!IS_MAPPED(nexthdr));
                 /* make both consistent, so that we can merge */
                   if (size > nextsize) {
                     GC_remap((ptr_t)next, nextsize);
                   } else {
                     GC_unmap((ptr_t)h, size);
                     hhdr -> hb_flags |= WAS_UNMAPPED;
                   }
               } else if (IS_MAPPED(nexthdr)) {
                 GC_ASSERT(!IS_MAPPED(hhdr));
                 if (size > nextsize) {
                   GC_unmap((ptr_t)next, nextsize);
                 } else {
                   GC_remap((ptr_t)h, size);
                   hhdr -> hb_flags &= ~WAS_UNMAPPED;
                 }
               } else {
                 /* Unmap any gap in the middle */
                   GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
               }
               /* If they are both unmapped, we merge, but leave unmapped. */
               GC_remove_from_fl(hhdr, i);
               GC_remove_from_fl(nexthdr, FL_UNKNOWN);
               hhdr -> hb_sz += nexthdr -> hb_sz;
               GC_remove_header(next);
               GC_add_to_fl(h, hhdr);
               /* Start over at beginning of list */
               h = GC_hblkfreelist[i];
             } else /* not mergable with successor */ {
               h = hhdr -> hb_next;
             }
         } /* while (h != 0) ... */
       } /* for ... */
   }
   
   #endif /* USE_MUNMAP */
   
   /*
    * Return a pointer to a block starting at h of length bytes.
    * Memory for the block is mapped.
    * Remove the block from its free list, and return the remainder (if any)
    * to its appropriate free list.
    * May fail by returning 0.
    * The header for the returned block must be set up by the caller.
    * If the return value is not 0, then hhdr is the header for it.
    */
   struct hblk * GC_get_first_part(h, hhdr, bytes, index)
   struct hblk *h;
   hdr * hhdr;
   word bytes;
   int index;
   {
       word total_size = hhdr -> hb_sz;
       struct hblk * rest;
       hdr * rest_hdr;
   
       GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
       GC_remove_from_fl(hhdr, index);
       if (total_size == bytes) return h;
       rest = (struct hblk *)((word)h + bytes);
       rest_hdr = GC_install_header(rest);
       if (0 == rest_hdr) return(0);
       rest_hdr -> hb_sz = total_size - bytes;
       rest_hdr -> hb_flags = 0;
   #   ifdef GC_ASSERTIONS
         // Mark h not free, to avoid assertion about adjacent free blocks.
           hhdr -> hb_map = 0;
   #   endif
       GC_add_to_fl(rest, rest_hdr);
       return h;
   }
   
   /*
    * H is a free block.  N points at an address inside it.
    * A new header for n has already been set up.  Fix up h's header
    * to reflect the fact that it is being split, move it to the
    * appropriate free list.
    * N replaces h in the original free list.
    *
    * Nhdr is not completely filled in, since it is about to allocated.
    * It may in fact end up on the wrong free list for its size.
    * (Hence adding it to a free list is silly.  But this path is hopefully
    * rare enough that it doesn't matter.  The code is cleaner this way.)
    */
   void GC_split_block(h, hhdr, n, nhdr, index)
   struct hblk *h;
   hdr * hhdr;
   struct hblk *n;
   hdr * nhdr;
   int index;      /* Index of free list */
   {
       word total_size = hhdr -> hb_sz;
       word h_size = (word)n - (word)h;
       struct hblk *prev = hhdr -> hb_prev;
       struct hblk *next = hhdr -> hb_next;
   
       /* Replace h with n on its freelist */
         nhdr -> hb_prev = prev;
         nhdr -> hb_next = next;
         nhdr -> hb_sz = total_size - h_size;
         nhdr -> hb_flags = 0;
         if (0 != prev) {
           HDR(prev) -> hb_next = n;
         } else {
           GC_hblkfreelist[index] = n;
         }
         if (0 != next) {
           HDR(next) -> hb_prev = n;
         }
   #     ifdef GC_ASSERTIONS
           nhdr -> hb_map = 0;     /* Don't fail test for consecutive      */
                                   /* free blocks in GC_add_to_fl.         */
   #     endif
   #   ifdef USE_MUNMAP
         hhdr -> hb_last_reclaimed = GC_gc_no;
   #   endif
       hhdr -> hb_sz = h_size;
       GC_add_to_fl(h, hhdr);
       GC_invalidate_map(nhdr);
   }
   
   struct hblk * GC_allochblk_nth();
   
 /*  /*
  * Allocate (and return pointer to) a heap block   * Allocate (and return pointer to) a heap block
  *   for objects of size sz words.   *   for objects of size sz words, searching the nth free list.
  *   *
  * NOTE: We set obj_map field in header correctly.   * NOTE: We set obj_map field in header correctly.
  *       Caller is resposnsible for building an object freelist in block.   *       Caller is responsible for building an object freelist in block.
  *   *
  * We clear the block if it is destined for large objects, and if   * We clear the block if it is destined for large objects, and if
  * kind requires that newly allocated objects be cleared.   * kind requires that newly allocated objects be cleared.
Line 124  word sz;
Line 493  word sz;
 int kind;  int kind;
 unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
 {  {
     register struct hblk *thishbp;      int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
     register hdr * thishdr;             /* Header corr. to thishbp */      int i;
       for (i = start_list; i <= N_HBLK_FLS; ++i) {
           struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
           if (0 != result) return result;
       }
       return 0;
   }
   /*
    * The same, but with search restricted to nth free list.
    */
   struct hblk *
   GC_allochblk_nth(sz, kind, flags, n)
   word sz;
   int kind;
   unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
   int n;
   {
     register struct hblk *hbp;      register struct hblk *hbp;
     register hdr * hhdr;                /* Header corr. to hbp */      register hdr * hhdr;                /* Header corr. to hbp */
     struct hblk *prevhbp;      register struct hblk *thishbp;
     register hdr * phdr;                /* Header corr. to prevhbp */      register hdr * thishdr;             /* Header corr. to hbp */
     signed_word size_needed;    /* number of bytes in requested objects */      signed_word size_needed;    /* number of bytes in requested objects */
     signed_word size_avail;     /* bytes available in this block        */      signed_word size_avail;     /* bytes available in this block        */
     int trip_count = 0;  
   
     size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);      size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
     if ((word)size_needed >  GC_max_hblk_size)  
         GC_max_hblk_size = size_needed;  
   
     /* search for a big enough block in free list */      /* search for a big enough block in free list */
         hbp = GC_savhbp;          hbp = GC_hblkfreelist[n];
         hhdr = HDR(hbp);          for(; 0 != hbp; hbp = hhdr -> hb_next) {
         for(;;) {              GET_HDR(hbp, hhdr);
   
             prevhbp = hbp;  
             phdr = hhdr;  
             hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);  
             hhdr = HDR(hbp);  
   
             if( prevhbp == GC_savhbp) {  
                 if (trip_count == LAST_TRIP) return(0);  
                 ++trip_count;  
             }  
   
             if( hbp == 0 ) continue;  
   
             size_avail = hhdr->hb_sz;              size_avail = hhdr->hb_sz;
 #           ifdef EXACT_FIRST  
                 if (trip_count <= 1 && size_avail != size_needed) continue;  
 #           endif  
             if (size_avail < size_needed) continue;              if (size_avail < size_needed) continue;
 #           ifdef PRESERVE_LAST              if (!GC_use_entire_heap) {
                 if (size_avail != size_needed                  if (size_avail != size_needed
                     && !GC_incremental                      && USED_HEAP_SIZE >= GC_requested_heapsize
                     && (word)size_needed <= GC_max_hblk_size/2                      && !GC_incremental && GC_should_collect()) {
                     && GC_in_last_heap_sect((ptr_t)hbp)  
                     && GC_should_collect()) {  
                     continue;                      continue;
                 }                  }
 #           endif              }
             /* If the next heap block is obviously better, go on.       */              /* If the next heap block is obviously better, go on.       */
             /* This prevents us from disassembling a single large block */              /* This prevents us from disassembling a single large block */
             /* to get tiny blocks.                                      */              /* to get tiny blocks.                                      */
Line 176  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
Line 540  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
               signed_word next_size;                signed_word next_size;
   
               thishbp = hhdr -> hb_next;                thishbp = hhdr -> hb_next;
               if (thishbp == 0) thishbp = GC_hblkfreelist;                if (thishbp != 0) {
               thishdr = HDR(thishbp);                  GET_HDR(thishbp, thishdr);
               next_size = (signed_word)(thishdr -> hb_sz);                  next_size = (signed_word)(thishdr -> hb_sz);
               if (next_size < size_avail                  if (next_size < size_avail
                   && next_size >= size_needed                    && next_size >= size_needed
                   && !GC_is_black_listed(thishbp, (word)size_needed)) {                    && !GC_is_black_listed(thishbp, (word)size_needed)) {
                   continue;                    continue;
                   }
               }                }
             }              }
             if ( !IS_UNCOLLECTABLE(kind) &&              if ( !IS_UNCOLLECTABLE(kind) &&
Line 203  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
Line 568  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
               size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;                size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
               thishbp = lasthbp;                thishbp = lasthbp;
               if (size_avail >= size_needed) {                if (size_avail >= size_needed) {
                 if (thishbp != hbp && GC_install_header(thishbp)) {                  if (thishbp != hbp &&
                       0 != (thishdr = GC_install_header(thishbp))) {
                     /* Make sure it's mapped before we mangle it. */
   #                   ifdef USE_MUNMAP
                         if (!IS_MAPPED(hhdr)) {
                           GC_remap((ptr_t)hbp, hhdr -> hb_sz);
                           hhdr -> hb_flags &= ~WAS_UNMAPPED;
                         }
   #                   endif
                   /* Split the block at thishbp */                    /* Split the block at thishbp */
                       thishdr = HDR(thishbp);                        GC_split_block(hbp, hhdr, thishbp, thishdr, n);
                       /* GC_invalidate_map not needed, since we will    */  
                       /* allocate this block.                           */  
                       thishdr -> hb_next = hhdr -> hb_next;  
                       thishdr -> hb_sz = size_avail;  
                       hhdr -> hb_sz = (ptr_t)thishbp - (ptr_t)hbp;  
                       hhdr -> hb_next = thishbp;  
                   /* Advance to thishbp */                    /* Advance to thishbp */
                       prevhbp = hbp;  
                       phdr = hhdr;  
                       hbp = thishbp;                        hbp = thishbp;
                       hhdr = thishdr;                        hhdr = thishdr;
                         /* We must now allocate thishbp, since it may     */
                         /* be on the wrong free list.                     */
                 }                  }
               } else if (size_needed > (signed_word)BL_LIMIT                } else if (size_needed > (signed_word)BL_LIMIT
                          && orig_avail - size_needed                           && orig_avail - size_needed
Line 224  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
Line 591  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
                 /* Punt, since anything else risks unreasonable heap growth. */                  /* Punt, since anything else risks unreasonable heap growth. */
                 WARN("Needed to allocate blacklisted block at 0x%lx\n",                  WARN("Needed to allocate blacklisted block at 0x%lx\n",
                      (word)hbp);                       (word)hbp);
                 thishbp = hbp;  
                 size_avail = orig_avail;                  size_avail = orig_avail;
               } else if (size_avail == 0                } else if (size_avail == 0 && size_needed == HBLKSIZE
                          && size_needed == HBLKSIZE                           && IS_MAPPED(hhdr)) {
                          && prevhbp != 0) {                  if (!GC_find_leak) {
 #               ifndef FIND_LEAK  
                   static unsigned count = 0;                    static unsigned count = 0;
   
                   /* The block is completely blacklisted.  We need      */                    /* The block is completely blacklisted.  We need      */
Line 241  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
Line 606  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
                     /* Allocate and drop the block in small chunks, to  */                      /* Allocate and drop the block in small chunks, to  */
                     /* maximize the chance that we will recover some    */                      /* maximize the chance that we will recover some    */
                     /* later.                                           */                      /* later.                                           */
                       struct hblk * limit = hbp + (hhdr->hb_sz/HBLKSIZE);                        word total_size = hhdr -> hb_sz;
                         struct hblk * limit = hbp + divHBLKSZ(total_size);
                       struct hblk * h;                        struct hblk * h;
                         struct hblk * prev = hhdr -> hb_prev;
   
                       GC_words_wasted += hhdr->hb_sz;                        GC_words_wasted += total_size;
                       phdr -> hb_next = hhdr -> hb_next;                        GC_large_free_bytes -= total_size;
                         GC_remove_from_fl(hhdr, n);
                       for (h = hbp; h < limit; h++) {                        for (h = hbp; h < limit; h++) {
                         if (h == hbp || GC_install_header(h)) {                          if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
                           hhdr = HDR(h);  
                           (void) setup_header(                            (void) setup_header(
                                   hhdr,                                    hhdr,
                                   BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),                                    BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
                                   PTRFREE, 0); /* Cant fail */                                    PTRFREE, 0); /* Cant fail */
                           if (GC_debugging_started) {                            if (GC_debugging_started) {
                             BZERO(hbp + HDR_BYTES, HBLKSIZE - HDR_BYTES);                              BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
                           }                            }
                         }                          }
                       }                        }
                     /* Restore hbp to point at free block */                      /* Restore hbp to point at free block */
                       if (GC_savhbp == hbp) GC_savhbp = prevhbp;                        hbp = prev;
                       hbp = prevhbp;                        if (0 == hbp) {
                       hhdr = phdr;                          return GC_allochblk_nth(sz, kind, flags, n);
                       if (hbp == GC_savhbp) --trip_count;                        }
                         hhdr = HDR(hbp);
                   }                    }
 #               endif                  }
               }                }
             }              }
             if( size_avail >= size_needed ) {              if( size_avail >= size_needed ) {
                 /* found a big enough block       */  #               ifdef USE_MUNMAP
                 /* let thishbp --> the block      */                    if (!IS_MAPPED(hhdr)) {
                 /* set prevhbp, hbp to bracket it */                      GC_remap((ptr_t)hbp, hhdr -> hb_sz);
                     thishbp = hbp;                      hhdr -> hb_flags &= ~WAS_UNMAPPED;
                     thishdr = hhdr;                    }
                     if( size_avail == size_needed ) {  #               endif
                         hbp = hhdr->hb_next;                  /* hbp may be on the wrong freelist; the parameter n    */
                         hhdr = HDR(hbp);                  /* is important.                                        */
                     } else {                  hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
                         hbp = (struct hblk *)  
                             (((word)thishbp) + size_needed);  
                         if (!GC_install_header(hbp)) {  
                             hbp = thishbp;  
                             continue;  
                         }  
                         hhdr = HDR(hbp);  
                         GC_invalidate_map(hhdr);  
                         hhdr->hb_next = thishdr->hb_next;  
                         hhdr->hb_sz = size_avail - size_needed;  
                     }  
                 /* remove *thishbp from hblk freelist */  
                     if( prevhbp == 0 ) {  
                         GC_hblkfreelist = hbp;  
                     } else {  
                         phdr->hb_next = hbp;  
                     }  
                 /* save current list search position */  
                     GC_savhbp = hbp;  
                 break;                  break;
             }              }
         }          }
   
       if (0 == hbp) return 0;
   
     /* Notify virtual dirty bit implementation that we are about to write. */      /* Notify virtual dirty bit implementation that we are about to write. */
         GC_write_hint(thishbp);          GC_write_hint(hbp);
         /* This should deal better with large blocks.   */  
   
     /* Add it to map of valid blocks */      /* Add it to map of valid blocks */
         if (!GC_install_counts(thishbp, (word)size_needed)) return(0);          if (!GC_install_counts(hbp, (word)size_needed)) return(0);
         /* This leaks memory under very rare conditions. */          /* This leaks memory under very rare conditions. */
   
     /* Set up header */      /* Set up header */
         if (!setup_header(thishdr, sz, kind, flags)) {          if (!setup_header(hhdr, sz, kind, flags)) {
             GC_remove_counts(thishbp, (word)size_needed);              GC_remove_counts(hbp, (word)size_needed);
             return(0); /* ditto */              return(0); /* ditto */
         }          }
   
     /* Clear block if necessary */      /* Clear block if necessary */
         if (GC_debugging_started          if (GC_debugging_started
             || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {              || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
             BZERO(thishbp + HDR_BYTES,  size_needed - HDR_BYTES);              BZERO(hbp + HDR_BYTES,  size_needed - HDR_BYTES);
         }          }
   
     /* We just successfully allocated a block.  Restart count of        */      /* We just successfully allocated a block.  Restart count of        */
Line 327  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
Line 677  unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */
   
         GC_fail_count = 0;          GC_fail_count = 0;
     }      }
   
       GC_large_free_bytes -= size_needed;
   
     return( thishbp );      GC_ASSERT(IS_MAPPED(hhdr));
       return( hbp );
 }  }
   
 struct hblk * GC_freehblk_ptr = 0;  /* Search position hint for GC_freehblk */  struct hblk * GC_freehblk_ptr = 0;  /* Search position hint for GC_freehblk */
Line 341  struct hblk * GC_freehblk_ptr = 0;  /* Search position
Line 694  struct hblk * GC_freehblk_ptr = 0;  /* Search position
  * All mark words are assumed to be cleared.   * All mark words are assumed to be cleared.
  */   */
 void  void
 GC_freehblk(p)  GC_freehblk(hbp)
 register struct hblk *p;  struct hblk *hbp;
 {  {
 register hdr *phdr;     /* Header corresponding to p */  struct hblk *next, *prev;
 register struct hblk *hbp, *prevhbp;  hdr *hhdr, *prevhdr, *nexthdr;
 register hdr *hhdr, *prevhdr;  signed_word size;
 register signed_word size;  
   
     /* GC_savhbp may become invalid due to coalescing.  Clear it. */  
         GC_savhbp = (struct hblk *)0;  
   
     phdr = HDR(p);      GET_HDR(hbp, hhdr);
     size = phdr->hb_sz;      size = hhdr->hb_sz;
     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);      size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
     GC_remove_counts(p, (word)size);      GC_remove_counts(hbp, (word)size);
     phdr->hb_sz = size;      hhdr->hb_sz = size;
     GC_invalidate_map(phdr);  
     prevhbp = 0;  
   
     /* The following optimization was suggested by David Detlefs.       */  
     /* Note that the header cannot be NIL, since there cannot be an     */  
     /* intervening  call to GC_freehblk without resetting               */  
     /* GC_freehblk_ptr.                                                 */  
     if (GC_freehblk_ptr != 0 &&  
         HDR(GC_freehblk_ptr)->hb_map == GC_invalid_map &&  
         (ptr_t)GC_freehblk_ptr < (ptr_t)p) {  
       hbp = GC_freehblk_ptr;  
     } else {  
       hbp = GC_hblkfreelist;  
     };  
     hhdr = HDR(hbp);  
   
     while( (hbp != 0) && (hbp < p) ) {  
         prevhbp = hbp;  
         prevhdr = hhdr;  
         hbp = hhdr->hb_next;  
         hhdr = HDR(hbp);  
     }  
     GC_freehblk_ptr = prevhbp;  
   
     /* Check for duplicate deallocation in the easy case */      /* Check for duplicate deallocation in the easy case */
       if (hbp != 0 && (ptr_t)p + size > (ptr_t)hbp        if (HBLK_IS_FREE(hhdr)) {
         || prevhbp != 0 && (ptr_t)prevhbp + prevhdr->hb_sz > (ptr_t)p) {  
         GC_printf1("Duplicate large block deallocation of 0x%lx\n",          GC_printf1("Duplicate large block deallocation of 0x%lx\n",
                    (unsigned long) p);                     (unsigned long) hbp);
         GC_printf2("Surrounding free blocks are 0x%lx and 0x%lx\n",  
                    (unsigned long) prevhbp, (unsigned long) hbp);  
       }        }
   
       GC_ASSERT(IS_MAPPED(hhdr));
       GC_invalidate_map(hhdr);
       next = (struct hblk *)((word)hbp + size);
       GET_HDR(next, nexthdr);
       prev = GC_free_block_ending_at(hbp);
     /* Coalesce with successor, if possible */      /* Coalesce with successor, if possible */
       if( (((word)p)+size) == ((word)hbp) ) {        if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
         phdr->hb_next = hhdr->hb_next;          GC_remove_from_fl(nexthdr, FL_UNKNOWN);
         phdr->hb_sz += hhdr->hb_sz;          hhdr -> hb_sz += nexthdr -> hb_sz;
         GC_remove_header(hbp);          GC_remove_header(next);
       } else {  
         phdr->hb_next = hbp;  
       }        }
       /* Coalesce with predecessor, if possible. */
         if (0 != prev) {
           prevhdr = HDR(prev);
           if (IS_MAPPED(prevhdr)) {
             GC_remove_from_fl(prevhdr, FL_UNKNOWN);
             prevhdr -> hb_sz += hhdr -> hb_sz;
             GC_remove_header(hbp);
             hbp = prev;
             hhdr = prevhdr;
           }
         }
   
       GC_large_free_bytes += size;
     if( prevhbp == 0 ) {      GC_add_to_fl(hbp, hhdr);
         GC_hblkfreelist = p;  
     } else if( (((word)prevhbp) + prevhdr->hb_sz)  
                == ((word)p) ) {  
       /* Coalesce with predecessor */  
         prevhdr->hb_next = phdr->hb_next;  
         prevhdr->hb_sz += phdr->hb_sz;  
         GC_remove_header(p);  
     } else {  
         prevhdr->hb_next = p;  
     }  
 }  }
   

Legend:
Removed from v.1.1.1.1  
changed lines
  Added in v.1.1.1.3

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>