Annotation of OpenXM_contrib2/asir2000/gc/mallocx.c, Revision 1.5
1.1 noro 1: /*
2: * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3: * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4: * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
1.3 noro 5: * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
1.1 noro 6: *
7: * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8: * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9: *
10: * Permission is hereby granted to use or copy this program
11: * for any purpose, provided the above notices are retained on all copies.
12: * Permission to modify the code and to distribute modified code is granted,
13: * provided the above notices are retained, and a notice that the code was
14: * modified is included with the above copyright notice.
15: */
16:
17: /*
18: * These are extra allocation routines which are likely to be less
19: * frequently used than those in malloc.c. They are separate in the
20: * hope that the .o file will be excluded from statically linked
21: * executables. We should probably break this up further.
22: */
23:
24: #include <stdio.h>
1.3 noro 25: #include "private/gc_priv.h"
1.1 noro 26:
27: extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
28: void GC_extend_size_map(); /* in misc.c. */
29: GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
30:
31: /* Some externally visible but unadvertised variables to allow access to */
32: /* free lists from inlined allocators without including gc_priv.h */
33: /* or introducing dependencies on internal data structure layouts. */
1.3 noro 34: ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
35: ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
36: ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
1.1 noro 37: # ifdef ATOMIC_UNCOLLECTABLE
1.3 noro 38: ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
1.1 noro 39: # endif
40:
1.3 noro 41:
42: GC_PTR GC_generic_or_special_malloc(lb,knd)
43: word lb;
44: int knd;
45: {
46: switch(knd) {
47: # ifdef STUBBORN_ALLOC
48: case STUBBORN:
49: return(GC_malloc_stubborn((size_t)lb));
50: # endif
51: case PTRFREE:
52: return(GC_malloc_atomic((size_t)lb));
53: case NORMAL:
54: return(GC_malloc((size_t)lb));
55: case UNCOLLECTABLE:
56: return(GC_malloc_uncollectable((size_t)lb));
57: # ifdef ATOMIC_UNCOLLECTABLE
58: case AUNCOLLECTABLE:
59: return(GC_malloc_atomic_uncollectable((size_t)lb));
60: # endif /* ATOMIC_UNCOLLECTABLE */
61: default:
62: return(GC_generic_malloc(lb,knd));
63: }
64: }
65:
66:
67: /* Change the size of the block pointed to by p to contain at least */
68: /* lb bytes. The object may be (and quite likely will be) moved. */
69: /* The kind (e.g. atomic) is the same as that of the old. */
70: /* Shrinking of large blocks is not implemented well. */
71: # ifdef __STDC__
72: GC_PTR GC_realloc(GC_PTR p, size_t lb)
73: # else
74: GC_PTR GC_realloc(p,lb)
75: GC_PTR p;
76: size_t lb;
77: # endif
1.1 noro 78: {
1.3 noro 79: register struct hblk * h;
80: register hdr * hhdr;
81: register word sz; /* Current size in bytes */
82: register word orig_sz; /* Original sz in bytes */
83: int obj_kind;
84:
85: if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
86: h = HBLKPTR(p);
87: hhdr = HDR(h);
88: sz = hhdr -> hb_sz;
89: obj_kind = hhdr -> hb_obj_kind;
90: sz = WORDS_TO_BYTES(sz);
91: orig_sz = sz;
92:
93: if (sz > MAXOBJBYTES) {
94: /* Round it up to the next whole heap block */
95: register word descr;
96:
97: sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
98: hhdr -> hb_sz = BYTES_TO_WORDS(sz);
99: descr = GC_obj_kinds[obj_kind].ok_descriptor;
100: if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
101: hhdr -> hb_descr = descr;
102: if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
103: /* Extra area is already cleared by GC_alloc_large_and_clear. */
1.1 noro 104: }
1.3 noro 105: if (ADD_SLOP(lb) <= sz) {
106: if (lb >= (sz >> 1)) {
107: # ifdef STUBBORN_ALLOC
108: if (obj_kind == STUBBORN) GC_change_stubborn(p);
109: # endif
110: if (orig_sz > lb) {
111: /* Clear unneeded part of object to avoid bogus pointer */
112: /* tracing. */
113: /* Safe for stubborn objects. */
114: BZERO(((ptr_t)p) + lb, orig_sz - lb);
115: }
116: return(p);
117: } else {
118: /* shrink */
119: GC_PTR result =
120: GC_generic_or_special_malloc((word)lb, obj_kind);
121:
122: if (result == 0) return(0);
123: /* Could also return original object. But this */
124: /* gives the client warning of imminent disaster. */
125: BCOPY(p, result, lb);
126: # ifndef IGNORE_FREE
127: GC_free(p);
128: # endif
129: return(result);
130: }
1.1 noro 131: } else {
1.3 noro 132: /* grow */
133: GC_PTR result =
134: GC_generic_or_special_malloc((word)lb, obj_kind);
135:
136: if (result == 0) return(0);
137: BCOPY(p, result, sz);
138: # ifndef IGNORE_FREE
139: GC_free(p);
140: # endif
141: return(result);
1.1 noro 142: }
143: }
144:
1.3 noro 145: # if defined(REDIRECT_MALLOC) || defined(REDIRECT_REALLOC)
146: # ifdef __STDC__
147: GC_PTR realloc(GC_PTR p, size_t lb)
148: # else
149: GC_PTR realloc(p,lb)
150: GC_PTR p;
151: size_t lb;
152: # endif
153: {
154: # ifdef REDIRECT_REALLOC
155: return(REDIRECT_REALLOC(p, lb));
156: # else
157: return(GC_realloc(p, lb));
158: # endif
159: }
160: # endif /* REDIRECT_MALLOC */
161:
162:
163: /* The same thing, except caller does not hold allocation lock. */
164: /* We avoid holding allocation lock while we clear memory. */
1.1 noro 165: ptr_t GC_generic_malloc_ignore_off_page(lb, k)
166: register size_t lb;
167: register int k;
168: {
169: register ptr_t result;
1.3 noro 170: word lw;
171: word n_blocks;
172: GC_bool init;
1.1 noro 173: DCL_LOCK_STATE;
174:
1.3 noro 175: if (SMALL_OBJ(lb))
176: return(GC_generic_malloc((word)lb, k));
177: lw = ROUNDED_UP_WORDS(lb);
178: n_blocks = OBJ_SZ_TO_BLOCKS(lw);
179: init = GC_obj_kinds[k].ok_init;
1.5 ! noro 180: if (GC_debugging_started) GC_print_all_smashed();
1.1 noro 181: GC_INVOKE_FINALIZERS();
182: DISABLE_SIGNALS();
183: LOCK();
1.3 noro 184: result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
185: if (0 != result) {
186: if (GC_debugging_started) {
187: BZERO(result, n_blocks * HBLKSIZE);
188: } else {
189: # ifdef THREADS
190: /* Clear any memory that might be used for GC descriptors */
191: /* before we release the lock. */
192: ((word *)result)[0] = 0;
193: ((word *)result)[1] = 0;
194: ((word *)result)[lw-1] = 0;
195: ((word *)result)[lw-2] = 0;
196: # endif
197: }
198: }
199: GC_words_allocd += lw;
1.1 noro 200: UNLOCK();
201: ENABLE_SIGNALS();
202: if (0 == result) {
203: return((*GC_oom_fn)(lb));
204: } else {
1.3 noro 205: if (init & !GC_debugging_started) {
206: BZERO(result, n_blocks * HBLKSIZE);
207: }
1.1 noro 208: return(result);
209: }
210: }
211:
212: # if defined(__STDC__) || defined(__cplusplus)
213: void * GC_malloc_ignore_off_page(size_t lb)
214: # else
215: char * GC_malloc_ignore_off_page(lb)
216: register size_t lb;
217: # endif
218: {
219: return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
220: }
221:
222: # if defined(__STDC__) || defined(__cplusplus)
223: void * GC_malloc_atomic_ignore_off_page(size_t lb)
224: # else
225: char * GC_malloc_atomic_ignore_off_page(lb)
226: register size_t lb;
227: # endif
228: {
229: return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
230: }
231:
232: /* Increment GC_words_allocd from code that doesn't have direct access */
233: /* to GC_arrays. */
234: # ifdef __STDC__
235: void GC_incr_words_allocd(size_t n)
236: {
237: GC_words_allocd += n;
238: }
239:
240: /* The same for GC_mem_freed. */
241: void GC_incr_mem_freed(size_t n)
242: {
243: GC_mem_freed += n;
244: }
245: # endif /* __STDC__ */
246:
247: /* Analogous to the above, but assumes a small object size, and */
248: /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
1.2 noro 249: ptr_t GC_generic_malloc_words_small_inner(lw, k)
250: register word lw;
251: register int k;
1.1 noro 252: {
253: register ptr_t op;
254: register ptr_t *opp;
255: register struct obj_kind * kind = GC_obj_kinds + k;
256:
257: opp = &(kind -> ok_freelist[lw]);
258: if( (op = *opp) == 0 ) {
259: if (!GC_is_initialized) {
260: GC_init_inner();
261: }
262: if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
263: op = GC_clear_stack(GC_allocobj((word)lw, k));
264: }
265: if (op == 0) {
266: UNLOCK();
267: ENABLE_SIGNALS();
268: return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
269: }
270: }
271: *opp = obj_link(op);
272: obj_link(op) = 0;
273: GC_words_allocd += lw;
1.2 noro 274: return((ptr_t)op);
275: }
276:
277: /* Analogous to the above, but assumes a small object size, and */
278: /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
279: #ifdef __STDC__
280: ptr_t GC_generic_malloc_words_small(size_t lw, int k)
281: #else
282: ptr_t GC_generic_malloc_words_small(lw, k)
283: register word lw;
284: register int k;
285: #endif
286: {
287: register ptr_t op;
288: DCL_LOCK_STATE;
289:
1.5 ! noro 290: if (GC_debugging_started) GC_print_all_smashed();
1.2 noro 291: GC_INVOKE_FINALIZERS();
292: DISABLE_SIGNALS();
293: LOCK();
294: op = GC_generic_malloc_words_small_inner(lw, k);
1.1 noro 295: UNLOCK();
296: ENABLE_SIGNALS();
297: return((ptr_t)op);
298: }
299:
300: #if defined(THREADS) && !defined(SRC_M3)
1.3 noro 301:
302: extern signed_word GC_mem_found; /* Protected by GC lock. */
303:
304: #ifdef PARALLEL_MARK
305: volatile signed_word GC_words_allocd_tmp = 0;
306: /* Number of words of memory allocated since */
307: /* we released the GC lock. Instead of */
308: /* reacquiring the GC lock just to add this in, */
309: /* we add it in the next time we reacquire */
310: /* the lock. (Atomically adding it doesn't */
311: /* work, since we would have to atomically */
312: /* update it in GC_malloc, which is too */
313: /* expensive. */
314: #endif /* PARALLEL_MARK */
315:
316: /* See reclaim.c: */
317: extern ptr_t GC_reclaim_generic();
318:
1.1 noro 319: /* Return a list of 1 or more objects of the indicated size, linked */
320: /* through the first word in the object. This has the advantage that */
321: /* it acquires the allocation lock only once, and may greatly reduce */
322: /* time wasted contending for the allocation lock. Typical usage would */
323: /* be in a thread that requires many items of the same size. It would */
324: /* keep its own free list in thread-local storage, and call */
325: /* GC_malloc_many or friends to replenish it. (We do not round up */
326: /* object sizes, since a call indicates the intention to consume many */
327: /* objects of exactly this size.) */
1.5 ! noro 328: /* We return the free-list by assigning it to *result, since it is */
! 329: /* not safe to return, e.g. a linked list of pointer-free objects, */
! 330: /* since the collector would not retain the entire list if it were */
! 331: /* invoked just as we were returning. */
1.1 noro 332: /* Note that the client should usually clear the link field. */
1.5 ! noro 333: void GC_generic_malloc_many(lb, k, result)
1.1 noro 334: register word lb;
335: register int k;
1.5 ! noro 336: ptr_t *result;
1.1 noro 337: {
338: ptr_t op;
1.3 noro 339: ptr_t p;
1.1 noro 340: ptr_t *opp;
341: word lw;
1.3 noro 342: word my_words_allocd = 0;
343: struct obj_kind * ok = &(GC_obj_kinds[k]);
1.1 noro 344: DCL_LOCK_STATE;
345:
1.3 noro 346: # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
347: # define COUNT_ARG , &my_words_allocd
348: # else
349: # define COUNT_ARG
350: # define NEED_TO_COUNT
351: # endif
1.1 noro 352: if (!SMALL_OBJ(lb)) {
353: op = GC_generic_malloc(lb, k);
354: if(0 != op) obj_link(op) = 0;
1.5 ! noro 355: *result = op;
! 356: return;
1.1 noro 357: }
358: lw = ALIGNED_WORDS(lb);
1.5 ! noro 359: if (GC_debugging_started) GC_print_all_smashed();
1.1 noro 360: GC_INVOKE_FINALIZERS();
361: DISABLE_SIGNALS();
362: LOCK();
1.3 noro 363: if (!GC_is_initialized) GC_init_inner();
1.5 ! noro 364: /* Do our share of marking work */
! 365: if (GC_incremental && !GC_dont_gc) {
! 366: ENTER_GC();
! 367: GC_collect_a_little_inner(1);
! 368: EXIT_GC();
! 369: }
1.3 noro 370: /* First see if we can reclaim a page of objects waiting to be */
371: /* reclaimed. */
372: {
373: struct hblk ** rlh = ok -> ok_reclaim_list;
374: struct hblk * hbp;
375: hdr * hhdr;
376:
377: rlh += lw;
378: while ((hbp = *rlh) != 0) {
379: hhdr = HDR(hbp);
380: *rlh = hhdr -> hb_next;
1.5 ! noro 381: hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
1.3 noro 382: # ifdef PARALLEL_MARK
383: {
384: signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
385:
386: GC_ASSERT(my_words_allocd_tmp >= 0);
387: /* We only decrement it while holding the GC lock. */
388: /* Thus we can't accidentally adjust it down in more */
389: /* than one thread simultaneously. */
390: if (my_words_allocd_tmp != 0) {
391: (void)GC_atomic_add(
392: (volatile GC_word *)(&GC_words_allocd_tmp),
393: (GC_word)(-my_words_allocd_tmp));
394: GC_words_allocd += my_words_allocd_tmp;
395: }
396: }
397: GC_acquire_mark_lock();
398: ++ GC_fl_builder_count;
399: UNLOCK();
400: ENABLE_SIGNALS();
401: GC_release_mark_lock();
402: # endif
403: op = GC_reclaim_generic(hbp, hhdr, lw,
404: ok -> ok_init, 0 COUNT_ARG);
405: if (op != 0) {
406: # ifdef NEED_TO_COUNT
407: /* We are neither gathering statistics, nor marking in */
408: /* parallel. Thus GC_reclaim_generic doesn't count */
409: /* for us. */
410: for (p = op; p != 0; p = obj_link(p)) {
411: my_words_allocd += lw;
412: }
413: # endif
414: # if defined(GATHERSTATS)
415: /* We also reclaimed memory, so we need to adjust */
416: /* that count. */
417: /* This should be atomic, so the results may be */
418: /* inaccurate. */
419: GC_mem_found += my_words_allocd;
420: # endif
421: # ifdef PARALLEL_MARK
1.5 ! noro 422: *result = op;
1.3 noro 423: (void)GC_atomic_add(
424: (volatile GC_word *)(&GC_words_allocd_tmp),
425: (GC_word)(my_words_allocd));
426: GC_acquire_mark_lock();
427: -- GC_fl_builder_count;
428: if (GC_fl_builder_count == 0) GC_notify_all_builder();
429: GC_release_mark_lock();
1.5 ! noro 430: (void) GC_clear_stack(0);
! 431: return;
1.3 noro 432: # else
433: GC_words_allocd += my_words_allocd;
434: goto out;
435: # endif
436: }
437: # ifdef PARALLEL_MARK
438: GC_acquire_mark_lock();
439: -- GC_fl_builder_count;
440: if (GC_fl_builder_count == 0) GC_notify_all_builder();
441: GC_release_mark_lock();
442: DISABLE_SIGNALS();
443: LOCK();
444: /* GC lock is needed for reclaim list access. We */
445: /* must decrement fl_builder_count before reaquiring GC */
446: /* lock. Hopefully this path is rare. */
447: # endif
448: }
1.1 noro 449: }
1.3 noro 450: /* Next try to use prefix of global free list if there is one. */
451: /* We don't refill it, but we need to use it up before allocating */
452: /* a new block ourselves. */
453: opp = &(GC_obj_kinds[k].ok_freelist[lw]);
454: if ( (op = *opp) != 0 ) {
455: *opp = 0;
456: my_words_allocd = 0;
457: for (p = op; p != 0; p = obj_link(p)) {
458: my_words_allocd += lw;
459: if (my_words_allocd >= BODY_SZ) {
1.1 noro 460: *opp = obj_link(p);
461: obj_link(p) = 0;
462: break;
1.3 noro 463: }
1.1 noro 464: }
1.3 noro 465: GC_words_allocd += my_words_allocd;
466: goto out;
467: }
468: /* Next try to allocate a new block worth of objects of this size. */
469: {
470: struct hblk *h = GC_allochblk(lw, k, 0);
471: if (h != 0) {
472: if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
473: GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
474: - BYTES_TO_WORDS(HBLKSIZE) % lw;
475: # ifdef PARALLEL_MARK
476: GC_acquire_mark_lock();
477: ++ GC_fl_builder_count;
478: UNLOCK();
479: ENABLE_SIGNALS();
480: GC_release_mark_lock();
481: # endif
482:
483: op = GC_build_fl(h, lw, ok -> ok_init, 0);
484: # ifdef PARALLEL_MARK
1.5 ! noro 485: *result = op;
1.3 noro 486: GC_acquire_mark_lock();
487: -- GC_fl_builder_count;
488: if (GC_fl_builder_count == 0) GC_notify_all_builder();
489: GC_release_mark_lock();
1.5 ! noro 490: (void) GC_clear_stack(0);
! 491: return;
1.3 noro 492: # else
493: goto out;
494: # endif
495: }
1.1 noro 496: }
497:
1.3 noro 498: /* As a last attempt, try allocating a single object. Note that */
499: /* this may trigger a collection or expand the heap. */
500: op = GC_generic_malloc_inner(lb, k);
501: if (0 != op) obj_link(op) = 0;
502:
503: out:
1.5 ! noro 504: *result = op;
1.1 noro 505: UNLOCK();
506: ENABLE_SIGNALS();
1.5 ! noro 507: (void) GC_clear_stack(0);
1.1 noro 508: }
509:
1.3 noro 510: GC_PTR GC_malloc_many(size_t lb)
1.1 noro 511: {
1.5 ! noro 512: ptr_t result;
! 513: GC_generic_malloc_many(lb, NORMAL, &result);
! 514: return result;
1.1 noro 515: }
516:
517: /* Note that the "atomic" version of this would be unsafe, since the */
518: /* links would not be seen by the collector. */
519: # endif
520:
521: /* Allocate lb bytes of pointerful, traced, but not collectable data */
522: # ifdef __STDC__
523: GC_PTR GC_malloc_uncollectable(size_t lb)
524: # else
525: GC_PTR GC_malloc_uncollectable(lb)
526: size_t lb;
527: # endif
528: {
529: register ptr_t op;
530: register ptr_t *opp;
531: register word lw;
532: DCL_LOCK_STATE;
533:
534: if( SMALL_OBJ(lb) ) {
535: # ifdef MERGE_SIZES
1.3 noro 536: if (EXTRA_BYTES != 0 && lb != 0) lb--;
1.1 noro 537: /* We don't need the extra byte, since this won't be */
538: /* collected anyway. */
539: lw = GC_size_map[lb];
540: # else
541: lw = ALIGNED_WORDS(lb);
542: # endif
543: opp = &(GC_uobjfreelist[lw]);
544: FASTLOCK();
545: if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
546: /* See above comment on signals. */
547: *opp = obj_link(op);
548: obj_link(op) = 0;
549: GC_words_allocd += lw;
550: /* Mark bit ws already set on free list. It will be */
551: /* cleared only temporarily during a collection, as a */
552: /* result of the normal free list mark bit clearing. */
553: GC_non_gc_bytes += WORDS_TO_BYTES(lw);
554: FASTUNLOCK();
555: return((GC_PTR) op);
556: }
557: FASTUNLOCK();
558: op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
559: } else {
560: op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
561: }
562: if (0 == op) return(0);
563: /* We don't need the lock here, since we have an undisguised */
564: /* pointer. We do need to hold the lock while we adjust */
565: /* mark bits. */
566: {
567: register struct hblk * h;
568:
569: h = HBLKPTR(op);
570: lw = HDR(h) -> hb_sz;
571:
572: DISABLE_SIGNALS();
573: LOCK();
574: GC_set_mark_bit(op);
575: GC_non_gc_bytes += WORDS_TO_BYTES(lw);
576: UNLOCK();
577: ENABLE_SIGNALS();
578: return((GC_PTR) op);
579: }
580: }
581:
582: # ifdef ATOMIC_UNCOLLECTABLE
583: /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
584: /* This is normally roughly equivalent to the system malloc. */
585: /* But it may be useful if malloc is redefined. */
586: # ifdef __STDC__
587: GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
588: # else
589: GC_PTR GC_malloc_atomic_uncollectable(lb)
590: size_t lb;
591: # endif
592: {
593: register ptr_t op;
594: register ptr_t *opp;
595: register word lw;
596: DCL_LOCK_STATE;
597:
598: if( SMALL_OBJ(lb) ) {
599: # ifdef MERGE_SIZES
1.3 noro 600: if (EXTRA_BYTES != 0 && lb != 0) lb--;
1.1 noro 601: /* We don't need the extra byte, since this won't be */
602: /* collected anyway. */
603: lw = GC_size_map[lb];
604: # else
605: lw = ALIGNED_WORDS(lb);
606: # endif
607: opp = &(GC_auobjfreelist[lw]);
608: FASTLOCK();
609: if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
610: /* See above comment on signals. */
611: *opp = obj_link(op);
612: obj_link(op) = 0;
613: GC_words_allocd += lw;
614: /* Mark bit was already set while object was on free list. */
615: GC_non_gc_bytes += WORDS_TO_BYTES(lw);
616: FASTUNLOCK();
617: return((GC_PTR) op);
618: }
619: FASTUNLOCK();
620: op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
621: } else {
622: op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
623: }
624: if (0 == op) return(0);
625: /* We don't need the lock here, since we have an undisguised */
626: /* pointer. We do need to hold the lock while we adjust */
627: /* mark bits. */
628: {
629: register struct hblk * h;
630:
631: h = HBLKPTR(op);
632: lw = HDR(h) -> hb_sz;
633:
634: DISABLE_SIGNALS();
635: LOCK();
636: GC_set_mark_bit(op);
637: GC_non_gc_bytes += WORDS_TO_BYTES(lw);
638: UNLOCK();
639: ENABLE_SIGNALS();
640: return((GC_PTR) op);
641: }
642: }
643:
644: #endif /* ATOMIC_UNCOLLECTABLE */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>