[BACK]Return to allchblk.c CVS log [TXT][DIR] Up to [local] / OpenXM_contrib2 / asir2000 / gc

Diff for /OpenXM_contrib2/asir2000/gc/allchblk.c between version 1.1.1.1 and 1.6

version 1.1.1.1, 1999/12/03 07:39:09 version 1.6, 2002/07/24 08:00:06
Line 2 
Line 2 
  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers   * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.   * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
  * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.   * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.
    * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  *   *
  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED   * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.   * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
Line 13 
Line 14 
  * modified is included with the above copyright notice.   * modified is included with the above copyright notice.
  */   */
   
 #define DEBUG  /* #define DEBUG */
 #undef DEBUG  
 #include <stdio.h>  #include <stdio.h>
 #include "gc_priv.h"  #include "private/gc_priv.h"
   
   GC_bool GC_use_entire_heap = 0;
   
 /*  /*
  * Free heap blocks are kept on one of several free lists,   * Free heap blocks are kept on one of several free lists,
Line 45 
Line 46 
   
 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };  struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
   
   #ifndef USE_MUNMAP
   
     word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
           /* Number of free bytes on each list.   */
   
     /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS       */
     /* > GC_max_large_allocd_bytes?                                       */
   # ifdef __GNUC__
     __inline__
   # endif
     static GC_bool GC_enough_large_bytes_left(bytes,n)
     word bytes;
     int n;
     {
       int i;
       for (i = N_HBLK_FLS; i >= n; --i) {
           bytes += GC_free_bytes[i];
           if (bytes > GC_max_large_allocd_bytes) return TRUE;
       }
       return FALSE;
     }
   
   # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
   
   # define FREE_ASSERT(e) GC_ASSERT(e)
   
   #else /* USE_MUNMAP */
   
   # define INCR_FREE_BYTES(n, b)
   # define FREE_ASSERT(e)
   
   #endif /* USE_MUNMAP */
   
 /* Map a number of blocks to the appropriate large block free list index. */  /* Map a number of blocks to the appropriate large block free list index. */
 int GC_hblk_fl_from_blocks(blocks_needed)  int GC_hblk_fl_from_blocks(blocks_needed)
 word blocks_needed;  word blocks_needed;
Line 56  word blocks_needed;
Line 90  word blocks_needed;
   
 }  }
   
 # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)  
 # define PHDR(hhdr) HDR(hhdr -> hb_prev)  # define PHDR(hhdr) HDR(hhdr -> hb_prev)
 # define NHDR(hhdr) HDR(hhdr -> hb_next)  # define NHDR(hhdr) HDR(hhdr -> hb_next)
   
Line 77  void GC_print_hblkfreelist()
Line 110  void GC_print_hblkfreelist()
   
     for (i = 0; i <= N_HBLK_FLS; ++i) {      for (i = 0; i <= N_HBLK_FLS; ++i) {
       h = GC_hblkfreelist[i];        h = GC_hblkfreelist[i];
       if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);  #     ifdef USE_MUNMAP
           if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n",
                                  (unsigned long)i);
   #     else
           if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
                                  (unsigned long)i,
                                  (unsigned long)GC_free_bytes[i]);
   #     endif
       while (h != 0) {        while (h != 0) {
         hhdr = HDR(h);          hhdr = HDR(h);
         sz = hhdr -> hb_sz;          sz = hhdr -> hb_sz;
Line 122  hdr * wanted;
Line 162  hdr * wanted;
   
 void GC_dump_regions()  void GC_dump_regions()
 {  {
     int i;      unsigned i;
     ptr_t start, end;      ptr_t start, end;
     ptr_t p;      ptr_t p;
     size_t bytes;      size_t bytes;
Line 217  void GC_remove_from_fl(hhdr, n)
Line 257  void GC_remove_from_fl(hhdr, n)
 hdr * hhdr;  hdr * hhdr;
 int n;  int n;
 {  {
       int index;
   
     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);      GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
   #   ifndef USE_MUNMAP
         /* We always need index to mainatin free counts.  */
         if (FL_UNKNOWN == n) {
             index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
         } else {
             index = n;
         }
   #   endif
     if (hhdr -> hb_prev == 0) {      if (hhdr -> hb_prev == 0) {
         int index;  #       ifdef USE_MUNMAP
         if (FL_UNKNOWN == n) {            if (FL_UNKNOWN == n) {
             index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));              index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
         } else {            } else {
             index = n;              index = n;
         }            }
   #       endif
         GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);          GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
         GC_hblkfreelist[index] = hhdr -> hb_next;          GC_hblkfreelist[index] = hhdr -> hb_next;
     } else {      } else {
         PHDR(hhdr) -> hb_next = hhdr -> hb_next;          hdr *phdr;
           GET_HDR(hhdr -> hb_prev, phdr);
           phdr -> hb_next = hhdr -> hb_next;
     }      }
       INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
       FREE_ASSERT(GC_free_bytes[index] >= 0);
     if (0 != hhdr -> hb_next) {      if (0 != hhdr -> hb_next) {
           hdr * nhdr;
         GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));          GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
         NHDR(hhdr) -> hb_prev = hhdr -> hb_prev;          GET_HDR(hhdr -> hb_next, nhdr);
           nhdr -> hb_prev = hhdr -> hb_prev;
     }      }
 }  }
   
Line 243  struct hblk * GC_free_block_ending_at(h)
Line 300  struct hblk * GC_free_block_ending_at(h)
 struct hblk *h;  struct hblk *h;
 {  {
     struct hblk * p = h - 1;      struct hblk * p = h - 1;
     hdr * phdr = HDR(p);      hdr * phdr;
   
       GET_HDR(p, phdr);
     while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {      while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
         p = FORWARDED_ADDR(p,phdr);          p = FORWARDED_ADDR(p,phdr);
         phdr = HDR(p);          phdr = HDR(p);
     }      }
     if (0 != phdr && HBLK_IS_FREE(phdr)) return p;      if (0 != phdr) {
           if(HBLK_IS_FREE(phdr)) {
               return p;
           } else {
               return 0;
           }
       }
     p = GC_prev_block(h - 1);      p = GC_prev_block(h - 1);
     if (0 != p) {      if (0 != p) {
       phdr = HDR(p);        phdr = HDR(p);
Line 270  hdr * hhdr;
Line 334  hdr * hhdr;
 {  {
     int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));      int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
     struct hblk *second = GC_hblkfreelist[index];      struct hblk *second = GC_hblkfreelist[index];
       hdr * second_hdr;
 #   ifdef GC_ASSERTIONS  #   ifdef GC_ASSERTIONS
       struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);        struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
       hdr * nexthdr = HDR(next);        hdr * nexthdr = HDR(next);
Line 280  hdr * hhdr;
Line 345  hdr * hhdr;
 #   endif  #   endif
     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);      GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
     GC_hblkfreelist[index] = h;      GC_hblkfreelist[index] = h;
       INCR_FREE_BYTES(index, hhdr -> hb_sz);
       FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
     hhdr -> hb_next = second;      hhdr -> hb_next = second;
     hhdr -> hb_prev = 0;      hhdr -> hb_prev = 0;
     if (0 != second) HDR(second) -> hb_prev = h;      if (0 != second) {
         GET_HDR(second, second_hdr);
         second_hdr -> hb_prev = h;
       }
     GC_invalidate_map(hhdr);      GC_invalidate_map(hhdr);
 }  }
   
Line 329  void GC_merge_unmapped(void)
Line 399  void GC_merge_unmapped(void)
     for (i = 0; i <= N_HBLK_FLS; ++i) {      for (i = 0; i <= N_HBLK_FLS; ++i) {
       h = GC_hblkfreelist[i];        h = GC_hblkfreelist[i];
       while (h != 0) {        while (h != 0) {
         hhdr = HDR(h);          GET_HDR(h, hhdr);
         size = hhdr->hb_sz;          size = hhdr->hb_sz;
         next = (struct hblk *)((word)h + size);          next = (struct hblk *)((word)h + size);
         nexthdr = HDR(next);          GET_HDR(next, nexthdr);
         /* Coalesce with successor, if possible */          /* Coalesce with successor, if possible */
           if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {            if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
             nextsize = nexthdr -> hb_sz;              nextsize = nexthdr -> hb_sz;
Line 397  int index;
Line 467  int index;
     GC_remove_from_fl(hhdr, index);      GC_remove_from_fl(hhdr, index);
     if (total_size == bytes) return h;      if (total_size == bytes) return h;
     rest = (struct hblk *)((word)h + bytes);      rest = (struct hblk *)((word)h + bytes);
     if (!GC_install_header(rest)) return(0);      rest_hdr = GC_install_header(rest);
     rest_hdr = HDR(rest);      if (0 == rest_hdr) return(0);
     rest_hdr -> hb_sz = total_size - bytes;      rest_hdr -> hb_sz = total_size - bytes;
     rest_hdr -> hb_flags = 0;      rest_hdr -> hb_flags = 0;
 #   ifdef GC_ASSERTIONS  #   ifdef GC_ASSERTIONS
       // Mark h not free, to avoid assertion about adjacent free blocks.        /* Mark h not free, to avoid assertion about adjacent free blocks. */
         hhdr -> hb_map = 0;          hhdr -> hb_map = 0;
 #   endif  #   endif
     GC_add_to_fl(rest, rest_hdr);      GC_add_to_fl(rest, rest_hdr);
Line 446  int index; /* Index of free list */
Line 516  int index; /* Index of free list */
       if (0 != next) {        if (0 != next) {
         HDR(next) -> hb_prev = n;          HDR(next) -> hb_prev = n;
       }        }
         INCR_FREE_BYTES(index, -(signed_word)h_size);
         FREE_ASSERT(GC_free_bytes[index] > 0);
 #     ifdef GC_ASSERTIONS  #     ifdef GC_ASSERTIONS
         nhdr -> hb_map = 0;     /* Don't fail test for consecutive      */          nhdr -> hb_map = 0;     /* Don't fail test for consecutive      */
                                 /* free blocks in GC_add_to_fl.         */                                  /* free blocks in GC_add_to_fl.         */
Line 467  struct hblk * GC_allochblk_nth();
Line 539  struct hblk * GC_allochblk_nth();
  * NOTE: We set obj_map field in header correctly.   * NOTE: We set obj_map field in header correctly.
  *       Caller is responsible for building an object freelist in block.   *       Caller is responsible for building an object freelist in block.
  *   *
  * We clear the block if it is destined for large objects, and if   * Unlike older versions of the collectors, the client is responsible
  * kind requires that newly allocated objects be cleared.   * for clearing the block, if necessary.
  */   */
 struct hblk *  struct hblk *
 GC_allochblk(sz, kind, flags)  GC_allochblk(sz, kind, flags)
 word sz;  word sz;
 int kind;  int kind;
 unsigned char flags;  /* IGNORE_OFF_PAGE or 0 */  unsigned flags;  /* IGNORE_OFF_PAGE or 0 */
 {  {
     int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));      word blocks = OBJ_SZ_TO_BLOCKS(sz);
       int start_list = GC_hblk_fl_from_blocks(blocks);
     int i;      int i;
     for (i = start_list; i <= N_HBLK_FLS; ++i) {      for (i = start_list; i <= N_HBLK_FLS; ++i) {
         struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);          struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
         if (0 != result) return result;          if (0 != result) {
               return result;
           }
     }      }
     return 0;      return 0;
 }  }
Line 505  int n;
Line 580  int n;
   
     /* search for a big enough block in free list */      /* search for a big enough block in free list */
         hbp = GC_hblkfreelist[n];          hbp = GC_hblkfreelist[n];
         hhdr = HDR(hbp);          for(; 0 != hbp; hbp = hhdr -> hb_next) {
         for(; 0 != hbp; hbp = hhdr -> hb_next, hhdr = HDR(hbp)) {              GET_HDR(hbp, hhdr);
             size_avail = hhdr->hb_sz;              size_avail = hhdr->hb_sz;
             if (size_avail < size_needed) continue;              if (size_avail < size_needed) continue;
 #           ifdef PRESERVE_LAST              if (!GC_use_entire_heap
                 if (size_avail != size_needed                  && size_avail != size_needed
                     && !GC_incremental && GC_should_collect()) {                  && USED_HEAP_SIZE >= GC_requested_heapsize
                   && !TRUE_INCREMENTAL && GC_should_collect()) {
   #               ifdef USE_MUNMAP
                     continue;                      continue;
                 }  #               else
 #           endif                      /* If we have enough large blocks left to cover any */
                       /* previous request for large blocks, we go ahead   */
                       /* and split.  Assuming a steady state, that should */
                       /* be safe.  It means that we can use the full      */
                       /* heap if we allocate only small objects.          */
                       if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
                         continue;
                       }
                       /* If we are deallocating lots of memory from       */
                       /* finalizers, fail and collect sooner rather       */
                       /* than later.                                      */
                       if (GC_finalizer_mem_freed > (GC_heapsize >> 4))  {
                         continue;
                       }
   #               endif /* !USE_MUNMAP */
               }
             /* If the next heap block is obviously better, go on.       */              /* If the next heap block is obviously better, go on.       */
             /* This prevents us from disassembling a single large block */              /* This prevents us from disassembling a single large block */
             /* to get tiny blocks.                                      */              /* to get tiny blocks.                                      */
Line 523  int n;
Line 615  int n;
   
               thishbp = hhdr -> hb_next;                thishbp = hhdr -> hb_next;
               if (thishbp != 0) {                if (thishbp != 0) {
                 thishdr = HDR(thishbp);                  GET_HDR(thishbp, thishdr);
                 next_size = (signed_word)(thishdr -> hb_sz);                  next_size = (signed_word)(thishdr -> hb_sz);
                 if (next_size < size_avail                  if (next_size < size_avail
                   && next_size >= size_needed                    && next_size >= size_needed
Line 544  int n;
Line 636  int n;
   
               while ((ptr_t)lasthbp <= search_end                while ((ptr_t)lasthbp <= search_end
                      && (thishbp = GC_is_black_listed(lasthbp,                       && (thishbp = GC_is_black_listed(lasthbp,
                                                       (word)eff_size_needed))) {                                                        (word)eff_size_needed))
                           != 0) {
                 lasthbp = thishbp;                  lasthbp = thishbp;
               }                }
               size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;                size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
               thishbp = lasthbp;                thishbp = lasthbp;
               if (size_avail >= size_needed) {                if (size_avail >= size_needed) {
                 if (thishbp != hbp && GC_install_header(thishbp)) {                  if (thishbp != hbp &&
                       0 != (thishdr = GC_install_header(thishbp))) {
                   /* Make sure it's mapped before we mangle it. */                    /* Make sure it's mapped before we mangle it. */
 #                   ifdef USE_MUNMAP  #                   ifdef USE_MUNMAP
                       if (!IS_MAPPED(hhdr)) {                        if (!IS_MAPPED(hhdr)) {
                         GC_remap((ptr_t)hbp, size_avail);                          GC_remap((ptr_t)hbp, hhdr -> hb_sz);
                         hhdr -> hb_flags &= ~WAS_UNMAPPED;                          hhdr -> hb_flags &= ~WAS_UNMAPPED;
                       }                        }
 #                   endif  #                   endif
                   /* Split the block at thishbp */                    /* Split the block at thishbp */
                       thishdr = HDR(thishbp);  
                       GC_split_block(hbp, hhdr, thishbp, thishdr, n);                        GC_split_block(hbp, hhdr, thishbp, thishdr, n);
                   /* Advance to thishbp */                    /* Advance to thishbp */
                       hbp = thishbp;                        hbp = thishbp;
Line 571  int n;
Line 664  int n;
                          && orig_avail - size_needed                           && orig_avail - size_needed
                             > (signed_word)BL_LIMIT) {                              > (signed_word)BL_LIMIT) {
                 /* Punt, since anything else risks unreasonable heap growth. */                  /* Punt, since anything else risks unreasonable heap growth. */
                 WARN("Needed to allocate blacklisted block at 0x%lx\n",                  if (++GC_large_alloc_warn_suppressed
                      (word)hbp);                      >= GC_large_alloc_warn_interval) {
                     WARN("Repeated allocation of very large block "
                          "(appr. size %ld):\n"
                          "\tMay lead to memory leak and poor performance.\n",
                          size_needed);
                     GC_large_alloc_warn_suppressed = 0;
                   }
                 size_avail = orig_avail;                  size_avail = orig_avail;
               } else if (size_avail == 0 && size_needed == HBLKSIZE                } else if (size_avail == 0 && size_needed == HBLKSIZE
                          && IS_MAPPED(hhdr)) {                           && IS_MAPPED(hhdr)) {
 #               ifndef FIND_LEAK                  if (!GC_find_leak) {
                   static unsigned count = 0;                    static unsigned count = 0;
   
                   /* The block is completely blacklisted.  We need      */                    /* The block is completely blacklisted.  We need      */
Line 597  int n;
Line 696  int n;
                       GC_large_free_bytes -= total_size;                        GC_large_free_bytes -= total_size;
                       GC_remove_from_fl(hhdr, n);                        GC_remove_from_fl(hhdr, n);
                       for (h = hbp; h < limit; h++) {                        for (h = hbp; h < limit; h++) {
                         if (h == hbp || GC_install_header(h)) {                          if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
                           hhdr = HDR(h);  
                           (void) setup_header(                            (void) setup_header(
                                   hhdr,                                    hhdr,
                                   BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),                                    BYTES_TO_WORDS(HBLKSIZE),
                                   PTRFREE, 0); /* Cant fail */                                    PTRFREE, 0); /* Cant fail */
                           if (GC_debugging_started) {                            if (GC_debugging_started) {
                             BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);                              BZERO(h, HBLKSIZE);
                           }                            }
                         }                          }
                       }                        }
Line 615  int n;
Line 713  int n;
                       }                        }
                       hhdr = HDR(hbp);                        hhdr = HDR(hbp);
                   }                    }
 #               endif                  }
               }                }
             }              }
             if( size_avail >= size_needed ) {              if( size_avail >= size_needed ) {
 #               ifdef USE_MUNMAP  #               ifdef USE_MUNMAP
                   if (!IS_MAPPED(hhdr)) {                    if (!IS_MAPPED(hhdr)) {
                     GC_remap((ptr_t)hbp, size_avail);                      GC_remap((ptr_t)hbp, hhdr -> hb_sz);
                     hhdr -> hb_flags &= ~WAS_UNMAPPED;                      hhdr -> hb_flags &= ~WAS_UNMAPPED;
                   }                    }
 #               endif  #               endif
Line 634  int n;
Line 732  int n;
   
     if (0 == hbp) return 0;      if (0 == hbp) return 0;
   
     /* Notify virtual dirty bit implementation that we are about to write. */  
         GC_write_hint(hbp);  
   
     /* Add it to map of valid blocks */      /* Add it to map of valid blocks */
         if (!GC_install_counts(hbp, (word)size_needed)) return(0);          if (!GC_install_counts(hbp, (word)size_needed)) return(0);
         /* This leaks memory under very rare conditions. */          /* This leaks memory under very rare conditions. */
Line 646  int n;
Line 741  int n;
             GC_remove_counts(hbp, (word)size_needed);              GC_remove_counts(hbp, (word)size_needed);
             return(0); /* ditto */              return(0); /* ditto */
         }          }
   
     /* Clear block if necessary */  
         if (GC_debugging_started  
             || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {  
             BZERO(hbp + HDR_BYTES,  size_needed - HDR_BYTES);  
         }  
   
       /* Notify virtual dirty bit implementation that we are about to write.  */
       /* Ensure that pointerfree objects are not protected if it's avoidable. */
           GC_remove_protection(hbp, divHBLKSZ(size_needed),
                                (hhdr -> hb_descr == 0) /* pointer-free */);
   
     /* We just successfully allocated a block.  Restart count of        */      /* We just successfully allocated a block.  Restart count of        */
     /* consecutive failures.                                            */      /* consecutive failures.                                            */
     {      {
Line 685  hdr *hhdr, *prevhdr, *nexthdr;
Line 779  hdr *hhdr, *prevhdr, *nexthdr;
 signed_word size;  signed_word size;
   
   
     hhdr = HDR(hbp);      GET_HDR(hbp, hhdr);
     size = hhdr->hb_sz;      size = hhdr->hb_sz;
     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);      size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
     GC_remove_counts(hbp, (word)size);      GC_remove_counts(hbp, (word)size);
Line 695  signed_word size;
Line 789  signed_word size;
       if (HBLK_IS_FREE(hhdr)) {        if (HBLK_IS_FREE(hhdr)) {
         GC_printf1("Duplicate large block deallocation of 0x%lx\n",          GC_printf1("Duplicate large block deallocation of 0x%lx\n",
                    (unsigned long) hbp);                     (unsigned long) hbp);
           ABORT("Duplicate large block deallocation");
       }        }
   
     GC_ASSERT(IS_MAPPED(hhdr));      GC_ASSERT(IS_MAPPED(hhdr));
     GC_invalidate_map(hhdr);      GC_invalidate_map(hhdr);
     next = (struct hblk *)((word)hbp + size);      next = (struct hblk *)((word)hbp + size);
     nexthdr = HDR(next);      GET_HDR(next, nexthdr);
     prev = GC_free_block_ending_at(hbp);      prev = GC_free_block_ending_at(hbp);
     /* Coalesce with successor, if possible */      /* Coalesce with successor, if possible */
       if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {        if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {

Legend:
Removed from v.1.1.1.1  
changed lines
  Added in v.1.6

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>