version 1.3, 2001/04/20 07:39:19 |
version 1.6, 2003/06/24 05:11:33 |
|
|
} |
} |
} |
} |
|
|
# if defined(REDIRECT_MALLOC) || defined(REDIRECT_REALLOC) |
# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC) |
|
# define REDIRECT_REALLOC GC_realloc |
|
# endif |
|
|
|
# ifdef REDIRECT_REALLOC |
# ifdef __STDC__ |
# ifdef __STDC__ |
GC_PTR realloc(GC_PTR p, size_t lb) |
GC_PTR realloc(GC_PTR p, size_t lb) |
# else |
# else |
|
|
size_t lb; |
size_t lb; |
# endif |
# endif |
{ |
{ |
# ifdef REDIRECT_REALLOC |
return(REDIRECT_REALLOC(p, lb)); |
return(REDIRECT_REALLOC(p, lb)); |
|
# else |
|
return(GC_realloc(p, lb)); |
|
# endif |
|
} |
} |
# endif /* REDIRECT_MALLOC */ |
# endif /* REDIRECT_REALLOC */ |
|
|
|
|
/* The same thing, except caller does not hold allocation lock. */ |
/* The same thing, except caller does not hold allocation lock. */ |
|
|
lw = ROUNDED_UP_WORDS(lb); |
lw = ROUNDED_UP_WORDS(lb); |
n_blocks = OBJ_SZ_TO_BLOCKS(lw); |
n_blocks = OBJ_SZ_TO_BLOCKS(lw); |
init = GC_obj_kinds[k].ok_init; |
init = GC_obj_kinds[k].ok_init; |
|
if (GC_have_errors) GC_print_all_errors(); |
GC_INVOKE_FINALIZERS(); |
GC_INVOKE_FINALIZERS(); |
DISABLE_SIGNALS(); |
DISABLE_SIGNALS(); |
LOCK(); |
LOCK(); |
Line 286 register struct obj_kind * kind = GC_obj_kinds + k; |
|
Line 287 register struct obj_kind * kind = GC_obj_kinds + k; |
|
register ptr_t op; |
register ptr_t op; |
DCL_LOCK_STATE; |
DCL_LOCK_STATE; |
|
|
|
if (GC_have_errors) GC_print_all_errors(); |
GC_INVOKE_FINALIZERS(); |
GC_INVOKE_FINALIZERS(); |
DISABLE_SIGNALS(); |
DISABLE_SIGNALS(); |
LOCK(); |
LOCK(); |
Line 323 extern ptr_t GC_reclaim_generic(); |
|
Line 325 extern ptr_t GC_reclaim_generic(); |
|
/* GC_malloc_many or friends to replenish it. (We do not round up */ |
/* GC_malloc_many or friends to replenish it. (We do not round up */ |
/* object sizes, since a call indicates the intention to consume many */ |
/* object sizes, since a call indicates the intention to consume many */ |
/* objects of exactly this size.) */ |
/* objects of exactly this size.) */ |
|
/* We return the free-list by assigning it to *result, since it is */ |
|
/* not safe to return, e.g. a linked list of pointer-free objects, */ |
|
/* since the collector would not retain the entire list if it were */ |
|
/* invoked just as we were returning. */ |
/* Note that the client should usually clear the link field. */ |
/* Note that the client should usually clear the link field. */ |
ptr_t GC_generic_malloc_many(lb, k) |
void GC_generic_malloc_many(lb, k, result) |
register word lb; |
register word lb; |
register int k; |
register int k; |
|
ptr_t *result; |
{ |
{ |
ptr_t op; |
ptr_t op; |
ptr_t p; |
ptr_t p; |
|
|
if (!SMALL_OBJ(lb)) { |
if (!SMALL_OBJ(lb)) { |
op = GC_generic_malloc(lb, k); |
op = GC_generic_malloc(lb, k); |
if(0 != op) obj_link(op) = 0; |
if(0 != op) obj_link(op) = 0; |
return(op); |
*result = op; |
|
return; |
} |
} |
lw = ALIGNED_WORDS(lb); |
lw = ALIGNED_WORDS(lb); |
|
if (GC_have_errors) GC_print_all_errors(); |
GC_INVOKE_FINALIZERS(); |
GC_INVOKE_FINALIZERS(); |
DISABLE_SIGNALS(); |
DISABLE_SIGNALS(); |
LOCK(); |
LOCK(); |
if (!GC_is_initialized) GC_init_inner(); |
if (!GC_is_initialized) GC_init_inner(); |
|
/* Do our share of marking work */ |
|
if (GC_incremental && !GC_dont_gc) { |
|
ENTER_GC(); |
|
GC_collect_a_little_inner(1); |
|
EXIT_GC(); |
|
} |
/* First see if we can reclaim a page of objects waiting to be */ |
/* First see if we can reclaim a page of objects waiting to be */ |
/* reclaimed. */ |
/* reclaimed. */ |
{ |
{ |
|
|
while ((hbp = *rlh) != 0) { |
while ((hbp = *rlh) != 0) { |
hhdr = HDR(hbp); |
hhdr = HDR(hbp); |
*rlh = hhdr -> hb_next; |
*rlh = hhdr -> hb_next; |
|
hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no; |
# ifdef PARALLEL_MARK |
# ifdef PARALLEL_MARK |
{ |
{ |
signed_word my_words_allocd_tmp = GC_words_allocd_tmp; |
signed_word my_words_allocd_tmp = GC_words_allocd_tmp; |
|
|
GC_mem_found += my_words_allocd; |
GC_mem_found += my_words_allocd; |
# endif |
# endif |
# ifdef PARALLEL_MARK |
# ifdef PARALLEL_MARK |
|
*result = op; |
(void)GC_atomic_add( |
(void)GC_atomic_add( |
(volatile GC_word *)(&GC_words_allocd_tmp), |
(volatile GC_word *)(&GC_words_allocd_tmp), |
(GC_word)(my_words_allocd)); |
(GC_word)(my_words_allocd)); |
|
|
-- GC_fl_builder_count; |
-- GC_fl_builder_count; |
if (GC_fl_builder_count == 0) GC_notify_all_builder(); |
if (GC_fl_builder_count == 0) GC_notify_all_builder(); |
GC_release_mark_lock(); |
GC_release_mark_lock(); |
return GC_clear_stack(op); |
(void) GC_clear_stack(0); |
|
return; |
# else |
# else |
GC_words_allocd += my_words_allocd; |
GC_words_allocd += my_words_allocd; |
goto out; |
goto out; |
|
|
|
|
op = GC_build_fl(h, lw, ok -> ok_init, 0); |
op = GC_build_fl(h, lw, ok -> ok_init, 0); |
# ifdef PARALLEL_MARK |
# ifdef PARALLEL_MARK |
|
*result = op; |
GC_acquire_mark_lock(); |
GC_acquire_mark_lock(); |
-- GC_fl_builder_count; |
-- GC_fl_builder_count; |
if (GC_fl_builder_count == 0) GC_notify_all_builder(); |
if (GC_fl_builder_count == 0) GC_notify_all_builder(); |
GC_release_mark_lock(); |
GC_release_mark_lock(); |
return GC_clear_stack(op); |
(void) GC_clear_stack(0); |
|
return; |
# else |
# else |
goto out; |
goto out; |
# endif |
# endif |
|
|
if (0 != op) obj_link(op) = 0; |
if (0 != op) obj_link(op) = 0; |
|
|
out: |
out: |
|
*result = op; |
UNLOCK(); |
UNLOCK(); |
ENABLE_SIGNALS(); |
ENABLE_SIGNALS(); |
return(GC_clear_stack(op)); |
(void) GC_clear_stack(0); |
} |
} |
|
|
GC_PTR GC_malloc_many(size_t lb) |
GC_PTR GC_malloc_many(size_t lb) |
{ |
{ |
return(GC_generic_malloc_many(lb, NORMAL)); |
ptr_t result; |
|
GC_generic_malloc_many(lb, NORMAL, &result); |
|
return result; |
} |
} |
|
|
/* Note that the "atomic" version of this would be unsafe, since the */ |
/* Note that the "atomic" version of this would be unsafe, since the */ |
|
|
return((GC_PTR) op); |
return((GC_PTR) op); |
} |
} |
} |
} |
|
|
|
#ifdef __STDC__ |
|
/* Not well tested nor integrated. */ |
|
/* Debug version is tricky and currently missing. */ |
|
#include <limits.h> |
|
|
|
GC_PTR GC_memalign(size_t align, size_t lb) |
|
{ |
|
size_t new_lb; |
|
size_t offset; |
|
ptr_t result; |
|
|
|
# ifdef ALIGN_DOUBLE |
|
if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb); |
|
# endif |
|
if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb); |
|
if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) { |
|
if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */; |
|
return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb); |
|
/* Will be HBLKSIZE aligned. */ |
|
} |
|
/* We could also try to make sure that the real rounded-up object size */ |
|
/* is a multiple of align. That would be correct up to HBLKSIZE. */ |
|
new_lb = lb + align - 1; |
|
result = GC_malloc(new_lb); |
|
offset = (word)result % align; |
|
if (offset != 0) { |
|
offset = align - offset; |
|
if (!GC_all_interior_pointers) { |
|
if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE); |
|
GC_register_displacement(offset); |
|
} |
|
} |
|
result = (GC_PTR) ((ptr_t)result + offset); |
|
GC_ASSERT((word)result % align == 0); |
|
return result; |
|
} |
|
#endif |
|
|
# ifdef ATOMIC_UNCOLLECTABLE |
# ifdef ATOMIC_UNCOLLECTABLE |
/* Allocate lb bytes of pointerfree, untraced, uncollectable data */ |
/* Allocate lb bytes of pointerfree, untraced, uncollectable data */ |