libgc: update libgc to commit 6d372272 (#21822)

This commit is contained in:
Kim Shrier 2024-08-04 12:16:08 -06:00 committed by GitHub
parent 44d2647e2d
commit 49f5ebf717
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 4992 additions and 4651 deletions

7729
thirdparty/libgc/gc.c vendored

File diff suppressed because it is too large Load diff

View file

@ -1,2 +1,6 @@
/* This file is installed for backward compatibility. */
#include "gc/gc.h"
__attribute__ ((weak)) GC_API void GC_CALL GC_noop1_ptr(volatile void *p) {
GC_noop1((u64)p);
}

View file

@ -58,6 +58,24 @@ typedef GC_SIGNEDWORD GC_signed_word;
#undef GC_SIGNEDWORD
#undef GC_UNSIGNEDWORD
#if (defined(_UINTPTR_T) || defined(_UINTPTR_T_DEFINED)) \
&& !defined(__MSYS__)
/* Note: MSYS2 might provide __uintptr_t instead of uintptr_t. */
typedef uintptr_t GC_uintptr_t;
#else
typedef GC_word GC_uintptr_t;
#endif
/* Is first pointer has a smaller address than the second one? The */
/* arguments should be of the same pointer type, e.g. of char* type. */
/* Ancient compilers might treat a pointer as a signed value, thus we */
/* need a cast to unsigned word of each compared pointer. */
#if defined(__GNUC__)
# define GC_ADDR_LT(p,q) ((p) < (q))
#else
# define GC_ADDR_LT(p,q) ((GC_word)(p) < (GC_word)(q))
#endif
/* Get the GC library version. The returned value is a constant in the */
/* form: ((version_major<<16) | (version_minor<<8) | version_micro). */
GC_API GC_VERSION_VAL_T GC_CALL GC_get_version(void);
@ -186,16 +204,17 @@ GC_API GC_ATTR_DEPRECATED int GC_all_interior_pointers;
/* not be changed after GC initialization (in */
/* case of calling it after the GC is */
/* initialized, the setter acquires the */
/* allocator lock. The initial value depends */
/* on whether the GC is built with */
/* ALL_INTERIOR_POINTERS macro defined or not. */
/* Unless DONT_ADD_BYTE_AT_END is defined, this */
/* also affects whether sizes are increased by */
/* at least a byte to allow "off the end" */
/* pointer recognition (but the size is not */
/* increased for uncollectible objects as well */
/* as for ignore-off-page objects of at least */
/* heap block size). Must be only 0 or 1. */
/* allocator lock. Must be only 0 or 1. The */
/* initial value depends on whether the GC is */
/* built with ALL_INTERIOR_POINTERS macro */
/* defined or not. This also affects, unless */
/* GC_get_dont_add_byte_at_end() returns */
/* a non-zero value, whether the object sizes */
/* are increased by at least a byte to allow */
/* "off the end" pointer recognition (but the */
/* size is not increased for uncollectible */
/* objects as well as for ignore-off-page */
/* objects of at least heap block size). */
GC_API void GC_CALL GC_set_all_interior_pointers(int);
GC_API int GC_CALL GC_get_all_interior_pointers(void);
@ -605,6 +624,19 @@ GC_API int GC_CALL GC_posix_memalign(void ** /* memptr */, size_t /* align */,
/* GC_free(0) is a no-op, as required by ANSI C for free. */
GC_API void GC_CALL GC_free(void *);
/* A symbol to be intercepted by heap profilers so that they can */
/* accurately track allocations. Programs such as Valgrind massif */
/* and KDE heaptrack do tracking of allocated objects by overriding */
/* common allocator methods (e.g. malloc and free). However, because */
/* the collector does not work by calling standard allocation methods */
/* on objects that were reclaimed, we need a way to tell the profiler */
/* that an object has been freed. This function is not intended to */
/* be called by the client, it should be used for the interception */
/* purpose only. The collector calls this function internally whenever */
/* an object is freed. Defined only if the library has been compiled */
/* with VALGRIND_TRACKING. */
GC_API void GC_CALLBACK GC_free_profiler_hook(void *);
/* The "stubborn" objects allocation is not supported anymore. Exists */
/* only for the backward compatibility. */
#define GC_MALLOC_STUBBORN(sz) GC_MALLOC(sz)
@ -640,9 +672,11 @@ GC_API void * GC_CALL GC_base(void * /* displaced_pointer */);
GC_API int GC_CALL GC_is_heap_ptr(const void *);
/* Given a pointer to the base of an object, return its size in bytes. */
/* The returned size may be slightly larger than what was originally */
/* requested. */
GC_API size_t GC_CALL GC_size(const void * /* obj_addr */) GC_ATTR_NONNULL(1);
/* (For small objects this also happens to work from interior pointers, */
/* but that should not be relied upon.) The returned size may be */
/* slightly larger than what was originally requested. The argument */
/* may be NULL (causing 0 to be returned). */
GC_API size_t GC_CALL GC_size(const void * /* obj_addr */);
/* For compatibility with C library. This is occasionally faster than */
/* a malloc followed by a bcopy. But if you rely on that, either here */
@ -885,9 +919,10 @@ GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s *,
/* typically to avoid data race on multiprocessors. */
GC_API size_t GC_CALL GC_get_size_map_at(int i);
/* Count total memory use (in bytes) by all allocated blocks. Acquires */
/* the allocator lock in the reader mode. */
GC_API size_t GC_CALL GC_get_memory_use(void);
/* Return the total memory use (in bytes) by all allocated blocks. */
/* The result is equal to GC_get_heap_size() - GC_get_free_bytes(). */
/* Acquires the allocator lock in the reader mode. */
GC_API GC_word GC_CALL GC_get_memory_use(void);
/* Disable garbage collection. Even GC_gcollect calls will be */
/* ineffective. */
@ -1002,9 +1037,24 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_malloc_atomic_ignore_off_page(size_t /* lb */);
#if (defined(GC_CAN_SAVE_CALL_STACKS) || defined(GC_ADD_CALLER)) \
&& !defined(GC_RETURN_ADDR_T_DEFINED)
/* A type to hold a function return address (pointer). Never used */
/* for calling a function. */
# if defined(__GNUC__)
/* Define it as a data (object) pointer type to avoid the compiler */
/* complain that ISO C forbids conversion between object and */
/* function pointer types. */
typedef void *GC_return_addr_t;
# else
typedef void (*GC_return_addr_t)(void);
# endif
# define GC_RETURN_ADDR_T_DEFINED
#endif /* GC_CAN_SAVE_CALL_STACKS || GC_ADD_CALLER */
#ifdef GC_ADD_CALLER
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
# define GC_EXTRA_PARAMS GC_word ra, const char * s, int i
# define GC_EXTRA_PARAMS GC_return_addr_t ra, const char * s, int i
#else
# define GC_EXTRAS __FILE__, __LINE__
# define GC_EXTRA_PARAMS const char * s, int i
@ -1065,15 +1115,21 @@ GC_API /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2) void * GC_CALL
GC_debug_realloc_replacement(void * /* object_addr */,
size_t /* size_in_bytes */);
#ifdef __cplusplus
# define GC_CAST_AWAY_CONST_PVOID(p) \
reinterpret_cast</* no const */ void *>(reinterpret_cast<GC_uintptr_t>(p))
#else
# define GC_CAST_AWAY_CONST_PVOID(p) ((/* no const */ void *)(GC_uintptr_t)(p))
#endif
/* Convenient macros for disappearing links registration working both */
/* for debug and non-debug allocated objects, and accepting interior */
/* pointers to object. */
#define GC_GENERAL_REGISTER_DISAPPEARING_LINK_SAFE(link, obj) \
GC_general_register_disappearing_link(link, \
GC_base((/* no const */ void *)(GC_word)(obj)))
GC_base(GC_CAST_AWAY_CONST_PVOID(obj)))
#define GC_REGISTER_LONG_LINK_SAFE(link, obj) \
GC_register_long_link(link, \
GC_base((/* no const */ void *)(GC_word)(obj)))
GC_register_long_link(link, GC_base(GC_CAST_AWAY_CONST_PVOID(obj)))
#ifdef GC_DEBUG_REPLACEMENT
# define GC_MALLOC(sz) GC_debug_malloc_replacement(sz)
@ -1329,7 +1385,7 @@ GC_API int GC_CALL GC_general_register_disappearing_link(void ** /* link */,
/* with) are ignored. This was added after a long */
/* email discussion with John Ellis. */
/* link must be non-NULL (and be properly aligned). */
/* obj must be a pointer to the first word of an object */
/* obj must be a pointer to the beginning of an object */
/* allocated by GC_malloc or friends. A link */
/* disappears when it is unregistered manually, or when */
/* (*link) is cleared, or when the object containing */
@ -1475,40 +1531,50 @@ GC_API int GC_CALL GC_invoke_finalizers(void);
/* associated external resource is still in use. */
/* The function is sometimes called keep_alive in other */
/* settings. */
#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
#if defined(__GNUC__) && !defined(__INTEL_COMPILER) \
&& !(defined(__APPLE__) && defined(__arm__) && defined(__TINYC__))
/* TCC (as of v0.9.28rc) does not support asm on macOS/arm. */
# if defined(__e2k__)
# define GC_reachable_here(ptr) \
__asm__ __volatile__ (" " : : "r"(ptr) : "memory")
# elif defined(__TINYC__)
# define GC_reachable_here(ptr) \
__asm__ __volatile__ (" " : : "g"(ptr) : "memory")
# else
# define GC_reachable_here(ptr) \
__asm__ __volatile__ (" " : : "X"(ptr) : "memory")
# endif
#elif defined(LINT2)
# define GC_reachable_here(ptr) GC_noop1(~(GC_word)(ptr)^(~(GC_word)0))
/* The expression matches the one of COVERT_DATAFLOW(). */
/* The expression is similar to that of COVERT_DATAFLOW(). */
#else
# define GC_reachable_here(ptr) GC_noop1((GC_word)(ptr))
# define GC_reachable_here(ptr) GC_noop1_ptr(GC_CAST_AWAY_CONST_PVOID(ptr))
#endif
/* Make the argument appear live to compiler. Should be robust against */
/* the whole program analysis. */
/* Make the argument of word type appear live to compiler. This could */
/* be used to prevent certain compiler false positive (FP) warnings and */
/* misoptimizations. Should be robust against the whole program */
/* analysis. */
GC_API void GC_CALL GC_noop1(GC_word);
/* Same as GC_noop1() but for a pointer. */
GC_API void GC_CALL GC_noop1_ptr(volatile void *);
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
/* p may not be a NULL pointer. msg is printf format string (arg must */
/* match the format). Both the setter and the getter acquire the */
/* allocator lock (in the reader mode in case of the getter) to avoid */
/* data race. In GC v7.1 and before, the setter returned the old */
/* warn_proc value. */
typedef void (GC_CALLBACK * GC_warn_proc)(char * /* msg */,
GC_word /* arg */);
/* data race. In GC v7.1 and before: the setter returned the value of */
/* old warn_proc. In GC v8.2.x and before: msg pointer type had no */
/* const qualifier. */
typedef void (GC_CALLBACK * GC_warn_proc)(const char * /* msg */,
GC_uintptr_t /* arg */);
GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc /* p */) GC_ATTR_NONNULL(1);
/* GC_get_warn_proc returns the current warn_proc. */
GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void);
/* GC_ignore_warn_proc may be used as an argument for GC_set_warn_proc */
/* to suppress all warnings (unless statistics printing is turned on). */
GC_API void GC_CALLBACK GC_ignore_warn_proc(char *, GC_word);
GC_API void GC_CALLBACK GC_ignore_warn_proc(const char *, GC_uintptr_t);
/* Change file descriptor of GC log. Unavailable on some targets. */
GC_API void GC_CALL GC_set_log_fd(int);
@ -1545,7 +1611,7 @@ GC_API void GC_CALL GC_abort_on_oom(void);
/* a race with the collector (e.g., one thread might fetch hidden link */
/* value, while another thread might collect the relevant object and */
/* reuse the free space for another object). */
typedef GC_word GC_hidden_pointer;
typedef GC_uintptr_t GC_hidden_pointer;
#define GC_HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
#define GC_REVEAL_POINTER(p) ((void *)GC_HIDE_POINTER(p))
@ -1716,9 +1782,9 @@ GC_API void GC_CALL GC_start_mark_threads(void);
/* while it is on an alt-stack. Acquires the allocator lock in the */
/* reader mode. */
GC_API void GC_CALL GC_register_altstack(void * /* normstack */,
GC_word /* normstack_size */,
size_t /* normstack_size */,
void * /* altstack */,
GC_word /* altstack_size */);
size_t /* altstack_size */);
/* Unregister the current thread. Only an explicitly registered */
/* thread (i.e. for which GC_register_my_thread() returns GC_SUCCESS) */
@ -1900,11 +1966,11 @@ GC_API void GC_CALL GC_dump_finalization(void);
/* Note that GC_PTR_ADD evaluates the first argument more than once. */
#if defined(GC_DEBUG) && (defined(__GNUC__) || defined(__clang__))
# define GC_PTR_ADD3(x, n, type_of_result) \
((type_of_result)GC_same_obj((x)+(n), (x)))
((type_of_result)GC_same_obj((x) + (n), (x)))
# define GC_PRE_INCR3(x, n, type_of_result) \
((type_of_result)GC_pre_incr((void **)(&(x)), (n)*sizeof(*x)))
((type_of_result)GC_pre_incr((void **)&(x), (n) * sizeof(*(x))))
# define GC_POST_INCR3(x, n, type_of_result) \
((type_of_result)GC_post_incr((void **)(&(x)), (n)*sizeof(*x)))
((type_of_result)GC_post_incr((void **)&(x), (n) * sizeof(*(x))))
# define GC_PTR_ADD(x, n) GC_PTR_ADD3(x, n, __typeof__(x))
# define GC_PRE_INCR(x, n) GC_PRE_INCR3(x, n, __typeof__(x))
# define GC_POST_INCR(x) GC_POST_INCR3(x, 1, __typeof__(x))
@ -1913,7 +1979,7 @@ GC_API void GC_CALL GC_dump_finalization(void);
/* We can't do this right without typeof, which ANSI decided was not */
/* sufficiently useful. Without it we resort to the non-debug version. */
/* TODO: This should eventually support C++0x decltype. */
# define GC_PTR_ADD(x, n) ((x)+(n))
# define GC_PTR_ADD(x, n) ((x) + (n))
# define GC_PRE_INCR(x, n) ((x) += (n))
# define GC_POST_INCR(x) ((x)++)
# define GC_POST_DECR(x) ((x)--)
@ -1948,12 +2014,15 @@ GC_API void GC_CALL GC_debug_ptr_store_and_dirty(void * /* p */,
# endif
#endif
/* This returns a list of objects, linked through their first word. */
/* Its use can greatly reduce lock contention problems, since the */
/* allocator lock can be acquired and released many fewer times. */
/* This returns a list of objects with the link pointer located at the */
/* beginning of each object. The use of such list can greatly reduce */
/* lock contention problems, since the allocator lock can be acquired */
/* and released many fewer times. Note that there is no "atomic" */
/* version of this function, as otherwise the links would not be seen */
/* by the collector. If the argument is zero, then it is treated as 1. */
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t /* lb */);
#define GC_NEXT(p) (*(void * *)(p)) /* Retrieve the next element */
/* in returned list. */
/* in the returned list. */
/* A filter function to control the scanning of dynamic libraries. */
/* If implemented, called by GC before registering a dynamic library */
@ -2022,13 +2091,6 @@ GC_API void GC_CALL GC_register_has_static_roots_callback(
# endif
# endif
# if !defined(_UINTPTR_T) && !defined(_UINTPTR_T_DEFINED) \
&& !defined(UINTPTR_MAX)
typedef GC_word GC_uintptr_t;
# else
typedef uintptr_t GC_uintptr_t;
# endif
# ifdef _WIN64
# define GC_WIN32_SIZE_T GC_uintptr_t
# elif defined(GC_WINDOWS_H_INCLUDED)
@ -2138,15 +2200,17 @@ GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void);
/* Cygwin/x64 does not add leading underscore to symbols anymore. */
extern int __data_start__[], __data_end__[];
extern int __bss_start__[], __bss_end__[];
# define GC_DATASTART ((GC_word)__data_start__ < (GC_word)__bss_start__ \
# define GC_DATASTART (GC_ADDR_LT((char *)__data_start__, \
(char *)__bss_start__) \
? (void *)__data_start__ : (void *)__bss_start__)
# define GC_DATAEND ((GC_word)__data_end__ > (GC_word)__bss_end__ \
# define GC_DATAEND (GC_ADDR_LT((char *)__bss_end__, (char *)__data_end__) \
? (void *)__data_end__ : (void *)__bss_end__)
# else
extern int _data_start__[], _data_end__[], _bss_start__[], _bss_end__[];
# define GC_DATASTART ((GC_word)_data_start__ < (GC_word)_bss_start__ \
# define GC_DATASTART (GC_ADDR_LT((char *)_data_start__, \
(char *)_bss_start__) \
? (void *)_data_start__ : (void *)_bss_start__)
# define GC_DATAEND ((GC_word)_data_end__ > (GC_word)_bss_end__ \
# define GC_DATAEND (GC_ADDR_LT((char *)_bss_end__, (char *)_data_end__) \
? (void *)_data_end__ : (void *)_bss_end__)
# endif /* !__x86_64__ */
# define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART, GC_DATAEND); \

View file

@ -45,6 +45,18 @@
#endif
#define GC_UNSIGNEDWORD unsigned GC_SIGNEDWORD
/* Size of a pointer in bytes. */
#if defined(__SIZEOF_POINTER__)
# define GC_SIZEOF_PTR __SIZEOF_POINTER__
#elif defined(__LP64__) || defined (_LP64) || defined(_WIN64) \
|| defined(__alpha__) || defined(__arch64__) \
|| defined(__powerpc64__) || defined(__s390x__) \
|| (defined(__x86_64__) && !defined(__ILP32__))
# define GC_SIZEOF_PTR 8
#else
# define GC_SIZEOF_PTR 4
#endif
/* The return type of GC_get_version(). A 32-bit unsigned integer */
/* or longer. */
# define GC_VERSION_VAL_T unsigned
@ -330,7 +342,7 @@
#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
# define GC_ADD_CALLER
# define GC_RETURN_ADDR (GC_word)__return_address
# define GC_RETURN_ADDR (GC_return_addr_t)__return_address
#endif
#if defined(__linux__) || defined(__GLIBC__)
@ -356,11 +368,8 @@
# define GC_HAVE_BUILTIN_BACKTRACE
#endif
#if defined(GC_HAVE_BUILTIN_BACKTRACE) && !defined(GC_CAN_SAVE_CALL_STACKS)
# define GC_CAN_SAVE_CALL_STACKS
#endif
#if defined(__sparc__)
#if defined(GC_HAVE_BUILTIN_BACKTRACE) && !defined(GC_CAN_SAVE_CALL_STACKS) \
|| defined(__sparc__)
# define GC_CAN_SAVE_CALL_STACKS
#endif
@ -380,19 +389,20 @@
# if GC_GNUC_PREREQ(2, 95)
/* gcc knows how to retrieve return address, but we don't know */
/* how to generate call stacks. */
# define GC_RETURN_ADDR (GC_word)__builtin_return_address(0)
# define GC_RETURN_ADDR (GC_return_addr_t)__builtin_return_address(0)
# if GC_GNUC_PREREQ(4, 0) && (defined(__i386__) || defined(__amd64__) \
|| defined(__x86_64__) /* and probably others... */) \
&& !defined(GC_NO_RETURN_ADDR_PARENT)
# define GC_HAVE_RETURN_ADDR_PARENT
# define GC_RETURN_ADDR_PARENT \
(GC_word)__builtin_extract_return_addr(__builtin_return_address(1))
(GC_return_addr_t)__builtin_extract_return_addr( \
__builtin_return_address(1))
/* Note: a compiler might complain that calling */
/* __builtin_return_address with a nonzero argument is unsafe. */
# endif
# else
/* Just pass 0 for gcc compatibility. */
# define GC_RETURN_ADDR 0
# define GC_RETURN_ADDR ((GC_return_addr_t)0)
# endif
#endif /* !GC_CAN_SAVE_CALL_STACKS */

View file

@ -72,14 +72,12 @@ GC_API GC_ATTR_DEPRECATED void GC_CALL GC_init_gcj_malloc(int /* mp_index */,
/* object if GC_malloc() would. In case of out of memory, GC_oom_fn() */
/* is called and its result is returned. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_gcj_malloc(size_t /* lb */,
void * /* ptr_to_struct_containing_descr */);
GC_gcj_malloc(size_t /* lb */, const void * /* vtable_ptr */);
/* The debug versions allocate such that the specified mark proc */
/* is always invoked. */
/* Similar to GC_gcj_malloc, but add the debug info. This is allocated */
/* with GC_gcj_debug_kind. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_debug_gcj_malloc(size_t /* lb */,
void * /* ptr_to_struct_containing_descr */,
GC_debug_gcj_malloc(size_t /* lb */, const void * /* vtable_ptr */,
GC_EXTRA_PARAMS);
/* Similar to GC_gcj_malloc, but assumes that a pointer to near the */
@ -87,7 +85,7 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
/* is always maintained. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_gcj_malloc_ignore_off_page(size_t /* lb */,
void * /* ptr_to_struct_containing_descr */);
const void * /* vtable_ptr */);
/* The kind numbers of normal and debug gcj objects. */
/* Useful only for debug support, we hope. */
@ -97,7 +95,7 @@ GC_API int GC_gcj_debug_kind;
#ifdef GC_DEBUG
# define GC_GCJ_MALLOC(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) GC_GCJ_MALLOC(s,d)
#else
# define GC_GCJ_MALLOC(s,d) GC_gcj_malloc(s,d)
# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) GC_gcj_malloc_ignore_off_page(s,d)

View file

@ -19,17 +19,17 @@
/* WARNING: */
/* Note that for these routines, it is the clients responsibility to */
/* add the extra byte at the end to deal with one-past-the-end pointers.*/
/* In the standard collector configuration, the collector assumes that */
/* such a byte has been added, and hence does not trace the last word */
/* in the resulting object. */
/* This is not an issue if the collector is compiled with */
/* DONT_ADD_BYTE_AT_END, or if GC_all_interior_pointers is not set. */
/* This interface is most useful for compilers that generate C. */
/* It is also used internally for thread-local allocation. */
/* Manual use is hereby discouraged. */
/* Clients should include atomic_ops.h (or similar) before this header. */
/* There is no debugging version of this allocation API. */
/* add the extra byte at the end to deal with one-past-the-end */
/* pointers. In the standard collector configuration, the collector */
/* assumes that such a byte has been added, and hence does not trace */
/* the last "pointer-sized" word in the resulting object. This is not */
/* an issue if GC_get_all_interior_pointers() returns 0 or */
/* if GC_get_dont_add_byte_at_end() returns 1. */
/* This interface is most useful for compilers that generate C. It is */
/* also used internally for thread-local allocation. A manual use is */
/* hereby discouraged. Clients should include atomic_ops.h (or */
/* similar) before this header. There is no debugging version of this */
/* allocation API. */
#include "gc.h"
#include "gc_tiny_fl.h"
@ -73,11 +73,31 @@
#define GC_I_PTRFREE 0
#define GC_I_NORMAL 1
/* Store a pointer to a list of newly allocated objects of kind k and */
/* size lb in *result. The caller must make sure that *result is */
/* traced even if objects are ptrfree. */
GC_API void GC_CALL GC_generic_malloc_many(size_t /* lb */, int /* k */,
void ** /* result */);
/* Determine if the collector has been configured not to pad the */
/* allocated objects even in the all-interior-pointers mode. */
/* Meaningful only if GC_get_all_interior_pointers() returns 1. */
GC_API int GC_CALL GC_get_dont_add_byte_at_end(void);
/* Return a list of one or more objects of the indicated size, linked */
/* through the first pointer in each object. This has the advantage */
/* that it acquires the allocator lock only once, and may greatly */
/* reduce time wasted contending for the allocator lock. Typical usage */
/* would be in a thread that requires many items of the same size. */
/* It would keep its own free list in a thread-local storage, and call */
/* GC_malloc_many or friends to replenish it. (We do not round up */
/* object sizes, since a call indicates the intention to consume many */
/* objects of exactly this size.) We assume that the size is non-zero */
/* and a multiple of GC_GRANULE_BYTES, and that the size already */
/* includes the value returned by GC_get_all_interior_pointers() */
/* (unless GC_get_dont_add_byte_at_end() returns a non-zero value). */
/* We return the free-list by assigning it to (*result), since it is */
/* not safe to return, e.g. a linked list of pointer-free objects, */
/* since the collector would not retain the entire list if it were */
/* invoked just as we were returning; the client must make sure that */
/* (*result) is traced even if objects are pointer-free. Note also */
/* that the client should usually clear the link field. */
GC_API void GC_CALL GC_generic_malloc_many(size_t /* lb_adjusted */,
int /* k */, void ** /* result */);
/* Generalized version of GC_malloc and GC_malloc_atomic. */
/* Uses appropriately the thread-local (if available) or the global */
@ -93,39 +113,44 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
# define GC_malloc_kind_global GC_malloc_kind
#endif
/* An internal macro to update the free list pointer atomically (if */
/* An internal macro to update the free-list pointer atomically (if */
/* the AO primitives are available) to avoid race with the marker. */
#if defined(GC_THREADS) && defined(AO_HAVE_store)
# define GC_FAST_M_AO_STORE(my_fl, next) \
AO_store((volatile AO_t *)(my_fl), (AO_t)(next))
#else
#if !defined(GC_THREADS) || !defined(AO_HAVE_store)
# define GC_FAST_M_AO_STORE(my_fl, next) (void)(*(my_fl) = (next))
#elif defined(__SIZEOF_POINTER__) && (__SIZEOF_POINTER__ > __SIZEOF_SIZE_T__)
/* Directly use the GCC atomic intrinsic as the size of a pointer is */
/* bigger than that of AO_t. */
# define GC_FAST_M_AO_STORE(my_fl, next) \
__atomic_store_n(my_fl, next, __ATOMIC_RELAXED)
#else
# define GC_FAST_M_AO_STORE(my_fl, next) \
AO_store((volatile AO_t *)(my_fl), (size_t)(next))
#endif
/* The ultimately general inline allocation macro. Allocate an object */
/* of size granules, putting the resulting pointer in result. Tiny_fl */
/* is a "tiny" free list array, which will be used first, if the size */
/* is appropriate. If granules argument is too large, we allocate with */
/* default_expr instead. If we need to refill the free list, we use */
/* GC_generic_malloc_many with the indicated kind. */
/* of size lg (in granules), putting the resulting pointer in result. */
/* Tiny_fl is a "tiny" free-list array, which will be used first, if */
/* the size is appropriate. If lg argument is too large, we allocate */
/* with default_expr instead. If we need to refill the free list, we */
/* use GC_generic_malloc_many with the indicated kind. */
/* Tiny_fl should be an array of GC_TINY_FREELISTS void * pointers. */
/* If num_direct is nonzero, and the individual free list pointers */
/* If num_direct is nonzero, and the individual free-list pointers */
/* are initialized to (void *)1, then we allocate num_direct granules */
/* directly using generic_malloc before putting multiple objects into */
/* the tiny_fl entry. If num_direct is zero, then the free lists may */
/* also be initialized to (void *)0. */
/* also be initialized to NULL. */
/* Note that we use the zeroth free list to hold objects 1 granule in */
/* size that are used to satisfy size 0 allocation requests. */
/* We rely on much of this hopefully getting optimized away in the */
/* case of num_direct is 0. Particularly, if granules argument is */
/* constant, this should generate a small amount of code. */
# define GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, num_direct, \
kind, default_expr, init) \
/* case of num_direct is 0. Particularly, if lg argument is constant, */
/* this should generate a small amount of code. */
#define GC_FAST_MALLOC_GRANS(result, lg, tiny_fl, num_direct, k, \
default_expr, init) \
do { \
if (GC_EXPECT((granules) >= GC_TINY_FREELISTS, 0)) { \
if (GC_EXPECT((lg) >= GC_TINY_FREELISTS, 0)) { \
result = (default_expr); \
} else { \
void **my_fl = (tiny_fl) + (granules); \
void **my_fl = (tiny_fl) + (lg); \
void *my_entry = *my_fl; \
void *next; \
\
@ -137,13 +162,13 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_FAST_M_AO_STORE(my_fl, next); \
init; \
GC_PREFETCH_FOR_WRITE(next); \
if ((kind) != GC_I_PTRFREE) { \
if ((k) != GC_I_PTRFREE) { \
GC_end_stubborn_change(my_fl); \
GC_reachable_here(next); \
} \
GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
GC_ASSERT((kind) == GC_I_PTRFREE \
|| ((GC_word *)result)[1] == 0); \
GC_ASSERT(GC_size(result) >= (lg) * GC_GRANULE_BYTES); \
GC_ASSERT((k) == GC_I_PTRFREE \
|| 0 /* NULL */ == ((void **)result)[1]); \
break; \
} \
/* Entry contains counter or NULL */ \
@ -151,18 +176,17 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
/* (GC_word)my_entry <= (num_direct) */ \
&& my_entry != 0 /* NULL */) { \
/* Small counter value, not NULL */ \
GC_FAST_M_AO_STORE(my_fl, (char *)my_entry \
+ (granules) + 1); \
GC_FAST_M_AO_STORE(my_fl, (char *)my_entry + (lg) + 1); \
result = (default_expr); \
break; \
} else { \
/* Large counter or NULL */ \
GC_generic_malloc_many((granules) == 0 ? GC_GRANULE_BYTES : \
GC_RAW_BYTES_FROM_INDEX(granules), \
kind, my_fl); \
GC_generic_malloc_many(0 == (lg) ? GC_GRANULE_BYTES \
: GC_RAW_BYTES_FROM_INDEX(lg), \
k, my_fl); \
my_entry = *my_fl; \
if (my_entry == 0) { \
result = (*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES); \
result = (*GC_get_oom_fn())((lg) * GC_GRANULE_BYTES); \
break; \
} \
} \
@ -170,32 +194,30 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
} \
} while (0)
# define GC_WORDS_TO_WHOLE_GRANULES(n) \
GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)
/* Allocate n words (not bytes). The pointer is stored to result. */
/* Note: this should really only be used if GC_all_interior_pointers is */
/* not set, or DONT_ADD_BYTE_AT_END is set; see above. */
/* Does not acquire the allocator lock. The caller is responsible for */
/* supplying a cleared tiny_fl free list array. For single-threaded */
/* applications, this may be a global array. */
# define GC_MALLOC_WORDS_KIND(result, n, tiny_fl, kind, init) \
/* Allocate n "pointer-sized" words. The allocation size is */
/* rounded up to a granule size. The pointer is stored to result. */
/* Should not be used unless GC_get_all_interior_pointers() returns 0 */
/* or if GC_get_dont_add_byte_at_end() returns 1. Does not acquire the */
/* allocator lock. The caller is responsible for supplying a cleared */
/* tiny_fl free-list array. For single-threaded applications, this may */
/* be a global array. */
#define GC_MALLOC_WORDS_KIND(result, n, tiny_fl, k, init) \
do { \
size_t granules = GC_WORDS_TO_WHOLE_GRANULES(n); \
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, 0, kind, \
GC_malloc_kind(granules*GC_GRANULE_BYTES, kind), \
init); \
size_t lg = GC_PTRS_TO_WHOLE_GRANULES(n); \
\
GC_FAST_MALLOC_GRANS(result, lg, tiny_fl, 0 /* num_direct */, k, \
GC_malloc_kind(lg * GC_GRANULE_BYTES, k), init); \
} while (0)
# define GC_MALLOC_WORDS(result, n, tiny_fl) \
#define GC_MALLOC_WORDS(result, n, tiny_fl) \
GC_MALLOC_WORDS_KIND(result, n, tiny_fl, GC_I_NORMAL, \
*(void **)(result) = 0)
(void)(*(void **)(result) = 0 /* NULL */))
# define GC_MALLOC_ATOMIC_WORDS(result, n, tiny_fl) \
#define GC_MALLOC_ATOMIC_WORDS(result, n, tiny_fl) \
GC_MALLOC_WORDS_KIND(result, n, tiny_fl, GC_I_PTRFREE, (void)0)
/* And once more for two word initialized objects: */
# define GC_CONS(result, first, second, tiny_fl) \
/* And one more for two-pointer initialized objects: */
#define GC_CONS(result, first, second, tiny_fl) \
do { \
void *l = (void *)(first); \
void *r = (void *)(second); \
@ -207,11 +229,11 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
} \
} while (0)
/* Print address of each object in the free list. The caller should */
/* hold the allocator lock at least in the reader mode. Defined only */
/* if the library has been compiled without NO_DEBUGGING. */
GC_API void GC_CALL GC_print_free_list(int /* kind */,
size_t /* sz_in_granules */);
/* Print address of each object in the free list for the given kind and */
/* size (in granules). The caller should hold the allocator lock at */
/* least in the reader mode. Defined only if the library has been */
/* compiled without NO_DEBUGGING. */
GC_API void GC_CALL GC_print_free_list(int /* k */, size_t /* lg */);
#ifdef __cplusplus
} /* extern "C" */

View file

@ -11,7 +11,6 @@
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/*
@ -63,14 +62,16 @@
/* to invoke the normal mark procedure instead. */
/* WARNING: Such a mark procedure may be invoked on an unused object */
/* residing on a free list. Such objects are cleared, except for a */
/* free list link field in the first word. Thus mark procedures may */
/* not count on the presence of a type descriptor, and must handle this */
/* case correctly somehow. Also, a mark procedure should be prepared */
/* to be executed concurrently from the marker threads (the later ones */
/* are created only if the client has called GC_start_mark_threads() */
/* or started a user thread previously). */
/* free-list link field (which is located at the beginning of each */
/* object). Thus mark procedures may not count on the presence of a */
/* type descriptor, and must handle this case correctly somehow. Also, */
/* a mark procedure should be prepared to be executed concurrently from */
/* the marker threads (the later ones are created only if the client */
/* has called GC_start_mark_threads() or started a user thread */
/* previously). For the compatibility reason, addr is a pointer to */
/* word, but it should be treated as a pointer to void pointer. */
typedef struct GC_ms_entry * (GC_CALLBACK * GC_mark_proc)(GC_word * /* addr */,
struct GC_ms_entry * /* mark_stack_ptr */,
struct GC_ms_entry * /* mark_stack_top */,
struct GC_ms_entry * /* mark_stack_limit */,
GC_word /* env */);
@ -85,19 +86,19 @@ typedef struct GC_ms_entry * (GC_CALLBACK * GC_mark_proc)(GC_word * /* addr */,
/* Object descriptors on mark stack or in objects. Low order two */
/* bits are tags distinguishing among the following 4 possibilities */
/* for the rest (high order) bits. */
/* for the rest (high-order) bits. */
#define GC_DS_TAG_BITS 2
#define GC_DS_TAGS ((1U << GC_DS_TAG_BITS) - 1)
#define GC_DS_LENGTH 0 /* The entire word is a length in bytes that */
/* must be a multiple of 4. */
#define GC_DS_BITMAP 1 /* The high order bits are describing pointer */
#define GC_DS_BITMAP 1 /* The high-order bits are describing pointer */
/* fields. The most significant bit is set if */
/* the first word is a pointer. */
/* the first "pointer-sized" word is a pointer. */
/* (This unconventional ordering sometimes */
/* makes the marker slightly faster.) */
/* Zeroes indicate definite nonpointers. Ones */
/* Zeroes indicate definite non-pointers; ones */
/* indicate possible pointers. */
/* Only usable if pointers are word aligned. */
/* Only usable if pointers are aligned. */
#define GC_DS_PROC 2
/* The objects referenced by this object can be */
/* pushed on the mark stack by invoking */
@ -107,21 +108,22 @@ typedef struct GC_ms_entry * (GC_CALLBACK * GC_mark_proc)(GC_word * /* addr */,
((((((GC_word)(env)) << GC_LOG_MAX_MARK_PROCS) \
| (unsigned)(proc_index)) << GC_DS_TAG_BITS) \
| (GC_word)GC_DS_PROC)
#define GC_DS_PER_OBJECT 3 /* The real descriptor is at the */
/* byte displacement from the beginning of the */
#define GC_DS_PER_OBJECT 3
/* The real descriptor is at the byte */
/* displacement from the beginning of the */
/* object given by descr & ~GC_DS_TAGS. */
/* If the descriptor is negative, the real */
/* descriptor is at (*<object_start>) - */
/* (descr&~GC_DS_TAGS) - GC_INDIR_PER_OBJ_BIAS */
/* (descr&~GC_DS_TAGS) - GC_INDIR_PER_OBJ_BIAS. */
/* The latter alternative can be used if each */
/* object contains a type descriptor in the */
/* first word. */
/* Note that in the multi-threaded environments */
/* per-object descriptors must be located in */
/* either the first two or last two words of */
/* the object, since only those are guaranteed */
/* to be cleared while the allocator lock is */
/* held. */
/* object contains a type descriptor at the */
/* beginning of the object. Note that in the */
/* multi-threaded environments per-object */
/* descriptors must be located in either the */
/* first two or last two "pointer-sized" words */
/* of the object, since only those are */
/* guaranteed to be cleared while the allocator */
/* lock is held. */
#define GC_INDIR_PER_OBJ_BIAS 0x10
GC_API void * GC_least_plausible_heap_addr;
@ -134,18 +136,18 @@ GC_API void * GC_greatest_plausible_heap_addr;
/* larger than GC_least_plausible_heap_addr and */
/* less than GC_greatest_plausible_heap_addr. */
/* Specify the pointer mask. Works only if the collector is built with */
/* DYNAMIC_POINTER_MASK macro defined. These primitives are normally */
/* needed only to support systems that use high-order pointer tags. */
/* The setter is expected to be called, if needed, before the GC */
/* Specify the pointer address mask. Works only if the collector is */
/* built with DYNAMIC_POINTER_MASK macro defined. These primitives are */
/* normally needed only to support systems that use high-order pointer */
/* tags. The setter is expected to be called, if needed, before the GC */
/* initialization or, at least, before the first object is allocated. */
/* Both the setter and the getter are unsynchronized. */
GC_API void GC_CALL GC_set_pointer_mask(GC_word);
GC_API GC_word GC_CALL GC_get_pointer_mask(void);
/* Similar to GC_set/get_pointer_mask but for the pointer shift. */
/* The value should be less than the size of word, in bits. Applied */
/* after the mask. */
/* Similar to GC_set/get_pointer_mask but for the pointer address */
/* shift. The value should be less than the size of word, in bits. */
/* Applied after the mask. */
GC_API void GC_CALL GC_set_pointer_shift(unsigned);
GC_API unsigned GC_CALL GC_get_pointer_shift(void);
@ -169,14 +171,26 @@ GC_API unsigned GC_CALL GC_get_pointer_shift(void);
/* Note that mark procedures should explicitly call FIXUP_POINTER() */
/* if required. */
GC_API struct GC_ms_entry * GC_CALL GC_mark_and_push(void * /* obj */,
struct GC_ms_entry * /* mark_stack_ptr */,
struct GC_ms_entry * /* mark_stack_top */,
struct GC_ms_entry * /* mark_stack_limit */,
void ** /* src */);
#define GC_MARK_AND_PUSH(obj, msp, lim, src) \
((GC_word)(obj) > (GC_word)GC_least_plausible_heap_addr \
&& (GC_word)(obj) < (GC_word)GC_greatest_plausible_heap_addr ? \
GC_mark_and_push(obj, msp, lim, src) : (msp))
(GC_ADDR_LT((char *)GC_least_plausible_heap_addr, (char *)(obj)) \
&& GC_ADDR_LT((char *)(obj), (char *)GC_greatest_plausible_heap_addr) \
? GC_mark_and_push(obj, msp, lim, src) : (msp))
GC_API void GC_CALL GC_push_proc(GC_word /* descr */, void * /* obj */);
GC_API struct GC_ms_entry * GC_CALL GC_custom_push_proc(GC_word /* descr */,
void * /* obj */,
struct GC_ms_entry * /* mark_stack_top */,
struct GC_ms_entry * /* mark_stack_limit */);
GC_API struct GC_ms_entry * GC_CALL GC_custom_push_range(void * /* bottom */,
void * /* top */,
struct GC_ms_entry * /* mark_stack_top */,
struct GC_ms_entry * /* mark_stack_limit */);
/* The size of the header added to objects allocated through the */
/* GC_debug routines. Defined as a function so that client mark */
@ -200,26 +214,26 @@ GC_API GC_ATTR_DEPRECATED
/* size and kind of object. */
GC_API GC_ATTR_CONST size_t GC_CALL GC_get_hblk_size(void);
typedef void (GC_CALLBACK * GC_walk_hblk_fn)(struct GC_hblk_s *,
void * /* client_data */);
/* Apply fn to each allocated heap block. It is the responsibility */
/* of the caller to avoid data race during the function execution (e.g. */
/* by acquiring the allocator lock at least in the reader mode). */
GC_API void GC_CALL GC_apply_to_all_blocks(GC_walk_hblk_fn,
void * /* client_data */) GC_ATTR_NONNULL(1);
/* Same as GC_walk_hblk_fn but with index of the free list. */
typedef void (GC_CALLBACK * GC_walk_free_blk_fn)(struct GC_hblk_s *,
int /* index */,
GC_word /* client_data */);
void * /* client_data */);
/* Apply fn to each completely empty heap block. It is the */
/* responsibility of the caller to avoid data race during the function */
/* execution (e.g. by acquiring the allocator lock at least in the */
/* reader mode). */
GC_API void GC_CALL GC_iterate_free_hblks(GC_walk_free_blk_fn,
GC_word /* client_data */) GC_ATTR_NONNULL(1);
typedef void (GC_CALLBACK * GC_walk_hblk_fn)(struct GC_hblk_s *,
GC_word /* client_data */);
/* Apply fn to each allocated heap block. It is the responsibility */
/* of the caller to avoid data race during the function execution (e.g. */
/* by acquiring the allocator lock at least in the reader mode). */
GC_API void GC_CALL GC_apply_to_all_blocks(GC_walk_hblk_fn,
GC_word /* client_data */) GC_ATTR_NONNULL(1);
void * /* client_data */) GC_ATTR_NONNULL(1);
/* If there are likely to be false references to a block starting at h */
/* of the indicated length, then return the next plausible starting */
@ -227,7 +241,7 @@ GC_API void GC_CALL GC_apply_to_all_blocks(GC_walk_hblk_fn,
/* NULL is returned. Assumes the allocator lock is held at least in */
/* the reader mode but no assertion about it by design. */
GC_API struct GC_hblk_s *GC_CALL GC_is_black_listed(struct GC_hblk_s *,
GC_word /* len */);
size_t /* len */);
/* Return the number of set mark bits for the heap block where object */
/* p is located. Defined only if the library has been compiled */
@ -238,7 +252,7 @@ GC_API unsigned GC_CALL GC_count_set_marks_in_hblk(const void * /* p */);
/* custom mark procedures, by language runtimes. */
/* The _inner versions assume the caller holds the allocator lock. */
/* Return a new free list array. */
/* Return a new free-list array. */
GC_API void ** GC_CALL GC_new_free_list(void);
GC_API void ** GC_CALL GC_new_free_list_inner(void);
@ -310,7 +324,7 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
#endif /* !GC_DEBUG */
/* Similar to GC_size but returns object kind. Size is returned too */
/* if psize is not NULL. */
/* if psize is not NULL. The object pointer should not be NULL. */
GC_API int GC_CALL GC_get_kind_and_size(const void *, size_t * /* psize */)
GC_ATTR_NONNULL(1);
@ -405,7 +419,7 @@ GC_API void GC_CALL GC_print_trace_inner(GC_word /* gc_no */);
/* setter and the getter acquire the allocator lock (in the reader mode */
/* in case of the getter). */
typedef struct GC_ms_entry * (GC_CALLBACK * GC_on_mark_stack_empty_proc)(
struct GC_ms_entry * /* mark_stack_ptr */,
struct GC_ms_entry * /* mark_stack_top */,
struct GC_ms_entry * /* mark_stack_limit */);
GC_API void GC_CALL GC_set_on_mark_stack_empty(GC_on_mark_stack_empty_proc);
GC_API GC_on_mark_stack_empty_proc GC_CALL GC_get_on_mark_stack_empty(void);

View file

@ -32,41 +32,40 @@
/* overhead and space usage for mark bits (usually mark bytes). */
/* On many 64-bit architectures some memory references require 16-byte */
/* alignment, making this necessary anyway. For a few 32-bit */
/* architectures (e.g. x86), we may also need 16-byte alignment for */
/* architectures (e.g. i686), we may also need 16-byte alignment for */
/* certain memory references. But currently that does not seem to be */
/* the default for all conventional malloc implementations, so we */
/* ignore that problem. */
/* It would always be safe, and often useful, to be able to allocate */
/* very small objects with smaller alignment. But that would cost us */
/* mark bit space, so we no longer do so. */
/* GC_GRANULE_BYTES should not be overridden in any instances of the GC */
/* library that may be shared between applications, since it affects */
/* the binary interface to the library. */
#ifndef GC_GRANULE_BYTES
# define GC_GRANULE_WORDS 2
# if defined(__LP64__) || defined (_LP64) || defined(_WIN64) \
|| defined(__alpha__) || defined(__arch64__) \
|| defined(__powerpc64__) || defined(__s390x__) \
|| (defined(__x86_64__) && !defined(__ILP32__))
# define GC_GRANULE_BYTES (GC_GRANULE_WORDS * 8 /* sizeof(void*) */)
# else
# define GC_GRANULE_BYTES (GC_GRANULE_WORDS * 4 /* sizeof(void*) */)
# endif
#if defined(CPPCHECK) && GC_GRANULE_BYTES == 1
# undef GC_GRANULE_BYTES
#endif
#ifdef GC_GRANULE_BYTES
# define GC_GRANULE_PTRS (GC_GRANULE_BYTES / GC_SIZEOF_PTR)
#else
# define GC_GRANULE_PTRS 2 /* in pointers */
# define GC_GRANULE_BYTES (GC_GRANULE_PTRS * GC_SIZEOF_PTR)
#endif /* !GC_GRANULE_BYTES */
#if GC_GRANULE_WORDS == 2
# define GC_WORDS_TO_GRANULES(n) ((n)>>1)
#else
# define GC_WORDS_TO_GRANULES(n) ((n)*sizeof(void *)/GC_GRANULE_BYTES)
#endif
/* Convert size in pointers to that in granules. */
#define GC_PTRS_TO_GRANULES(n) ((n) / GC_GRANULE_PTRS)
/* A "tiny" free list header contains GC_TINY_FREELISTS pointers to */
/* Convert size in pointers to that in granules, but rounding up the */
/* result. */
#define GC_PTRS_TO_WHOLE_GRANULES(n) \
GC_PTRS_TO_GRANULES((n) + GC_GRANULE_PTRS - 1)
/* A "tiny" free-list header contains GC_TINY_FREELISTS pointers to */
/* singly linked lists of objects of different sizes, the i-th one */
/* containing objects i granules in size. Note that there is a list */
/* of size zero objects. */
#ifndef GC_TINY_FREELISTS
# if GC_GRANULE_BYTES == 16
# if GC_GRANULE_BYTES >= 16
# define GC_TINY_FREELISTS 25
# else
# define GC_TINY_FREELISTS 33 /* Up to and including 256 bytes */
@ -75,12 +74,23 @@
/* The i-th free list corresponds to size i*GC_GRANULE_BYTES */
/* Internally to the collector, the index can be computed with */
/* ALLOC_REQUEST_GRANS(). Externally, we don't know whether */
/* DONT_ADD_BYTE_AT_END is set, but the client should know. */
/* ALLOC_REQUEST_GRANS(). The later also depends on the */
/* values returned by GC_get_dont_add_byte_at_end() and */
/* GC_get_all_interior_pointers(). */
/* Convert a free list index to the actual size of objects */
/* Convert a free-list index to the actual size of objects */
/* on that list, including extra space we added. Not an */
/* inverse of the above. */
#define GC_RAW_BYTES_FROM_INDEX(i) ((i) * GC_GRANULE_BYTES)
/* Deprecated. Use GC_GRANULE_PTRS instead. */
#undef GC_GRANULE_WORDS
#define GC_GRANULE_WORDS GC_GRANULE_PTRS
/* Deprecated. Use GC_PTRS_TO_GRANULES() instead. */
#define GC_WORDS_TO_GRANULES(n) GC_PTRS_TO_GRANULES(n)
/* Deprecated. */
#define GC_WORDS_TO_WHOLE_GRANULES(n) GC_PTRS_TO_WHOLE_GRANULES(n)
#endif /* GC_TINY_FL_H */

View file

@ -34,19 +34,32 @@
extern "C" {
#endif
typedef GC_word * GC_bitmap;
/* The least significant bit of the first word is one if */
/* the first word in the object may be a pointer. */
/* The size of word (not a pointer) in bits. */
#define GC_WORDSZ (8 * sizeof(GC_word))
#define GC_get_bit(bm, index) /* index should be of unsigned type */ \
(((bm)[(index) / GC_WORDSZ] >> ((index) % GC_WORDSZ)) & 1)
#define GC_set_bit(bm, index) /* index should be of unsigned type */ \
((bm)[(index) / GC_WORDSZ] |= (GC_word)1 << ((index) % GC_WORDSZ))
#define GC_WORD_OFFSET(t, f) (offsetof(t,f) / sizeof(GC_word))
/* The size of a type in words. */
#define GC_WORD_LEN(t) (sizeof(t) / sizeof(GC_word))
/* The offset of a field in words. */
#define GC_WORD_OFFSET(t, f) (offsetof(t, f) / sizeof(GC_word))
/* The bitmap type. The least significant bit of the first word is one */
/* if the first "pointer-sized" word in the object may be a pointer. */
typedef GC_word * GC_bitmap;
/* The number of elements (words) of a bitmap array to create for */
/* a given type. The bitmap is intended to be passed to */
/* GC_make_descriptor(). */
#define GC_BITMAP_SIZE(t) ((GC_WORD_LEN(t) + GC_WORDSZ - 1) / GC_WORDSZ)
/* The setter and getter of the bitmap. The bm argument should be of */
/* GC_bitmap type; index argument should be of some unsigned type and */
/* should not have side effects. */
#define GC_set_bit(bm, index) \
((bm)[(index) / GC_WORDSZ] |= (GC_word)1 << ((index) % GC_WORDSZ))
#define GC_get_bit(bm, index) \
(((bm)[(index) / GC_WORDSZ] >> ((index) % GC_WORDSZ)) & 1)
typedef GC_word GC_descr;
GC_API GC_descr GC_CALL GC_make_descriptor(const GC_word * /* GC_bitmap bm */,
@ -68,7 +81,7 @@ GC_API GC_descr GC_CALL GC_make_descriptor(const GC_word * /* GC_bitmap bm */,
/* per allocation. */
/* It is possible to generate a descriptor for a C type T with */
/* word aligned pointer fields f1, f2, ... as follows: */
/* word-aligned pointer fields f1, f2, ... as follows: */
/* */
/* GC_descr T_descr; */
/* GC_word T_bitmap[GC_BITMAP_SIZE(T)] = {0}; */
@ -82,8 +95,8 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_descr /* d */);
/* Allocate an object whose layout is described by d. */
/* The size may NOT be less than the number of */
/* meaningful bits in the bitmap of d multiplied by */
/* sizeof GC_word. The returned object is cleared. */
/* meaningful bits in the bitmap of d multiplied by the */
/* size of a pointer. The returned object is cleared. */
/* The returned object may NOT be passed to GC_realloc. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
@ -101,16 +114,18 @@ GC_API GC_ATTR_MALLOC GC_ATTR_CALLOC_SIZE(1, 2) void * GC_CALL
/* machine with 16-bit aligned pointers, size_in_bytes */
/* must be a multiple of 2. The element size may NOT */
/* be less than the number of meaningful bits in the */
/* bitmap of d multiplied by sizeof GC_word. */
/* bitmap of d multiplied by the size of a pointer. */
/* Returned object is cleared. */
#define GC_CALLOC_TYPED_DESCR_WORDS 8
#define GC_CALLOC_TYPED_DESCR_PTRS 1
#define GC_CALLOC_TYPED_DESCR_WORDS 8 /* includes space for pointers */
#ifdef GC_BUILD
struct GC_calloc_typed_descr_s;
#else
struct GC_calloc_typed_descr_s {
GC_word opaque[GC_CALLOC_TYPED_DESCR_WORDS];
GC_uintptr_t opaque_p[GC_CALLOC_TYPED_DESCR_PTRS];
GC_word opaque[GC_CALLOC_TYPED_DESCR_WORDS - GC_CALLOC_TYPED_DESCR_PTRS];
};
#endif

View file

@ -78,6 +78,13 @@
# define pvalloc(n) GC_pvalloc(n) /* obsolete */
#endif /* !GC_NO_VALLOC */
#undef malloc_usable_size /* available in glibc */
#define malloc_usable_size(p) GC_size(p)
#undef malloc_size /* available on Darwin */
#define malloc_size(p) GC_size(p)
#undef _msize /* available in Windows CRT */
#define _msize(p) GC_size(p)
#ifndef CHECK_LEAKS
# define CHECK_LEAKS() GC_gcollect()
/* Note 1: CHECK_LEAKS does not have GC prefix (preserved for */