Initial revision

This commit is contained in:
Akinori Ito
2001-11-08 05:14:08 +00:00
commit 68a07bf03b
305 changed files with 104639 additions and 0 deletions

56
gc/include/backptr.h Normal file
View File

@@ -0,0 +1,56 @@
/*
* This is a simple API to implement pointer back tracing, i.e.
* to answer questions such as "who is pointing to this" or
* "why is this object being retained by the collector"
*
* This API assumes that we have an ANSI C compiler.
*
* Most of these calls yield useful information on only after
* a garbage collection. Usually the client will first force
* a full collection and then gather information, preferably
* before much intervening allocation.
*
* The implementation of the interface is only about 99.9999%
* correct. It is intended to be good enough for profiling,
* but is not intended to be used with production code.
*
* Results are likely to be much more useful if all allocation is
* accomplished through the debugging allocators.
*
* The implementation idea is due to A. Demers.
*/
/* Store information about the object referencing dest in *base_p */
/* and *offset_p. */
/* If multiple objects or roots point to dest, the one reported */
/* will be the last on used by the garbage collector to trace the */
/* object. */
/* source is root ==> *base_p = address, *offset_p = 0 */
/* source is heap object ==> *base_p != 0, *offset_p = offset */
/* Returns 1 on success, 0 if source couldn't be determined. */
/* Dest can be any address within a heap object. */
typedef enum { GC_UNREFERENCED, /* No refence info available. */
GC_NO_SPACE, /* Dest not allocated with debug alloc */
GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
GC_FINALIZER_REFD /* Finalizable and hence accessible. */
} GC_ref_kind;
GC_ref_kind GC_get_back_ptr_info(void *dest, void **base_p, size_t *offset_p);
/* Generate a random heap address. */
/* The resulting address is in the heap, but */
/* not necessarily inside a valid object. */
void * GC_generate_random_heap_address(void);
/* Generate a random address inside a valid marked heap object. */
void * GC_generate_random_valid_address(void);
/* Force a garbage collection and generate a backtrace from a */
/* random heap address. */
/* This uses the GC logging mechanism (GC_printf) to produce */
/* output. It can often be called from a debugger. The */
/* source in dbg_mlc.c also serves as a sample client. */
void GC_generate_random_backtrace(void);

327
gc/include/cord.h Normal file
View File

@@ -0,0 +1,327 @@
/*
* Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
* Author: Hans-J. Boehm (boehm@parc.xerox.com)
*/
/* Boehm, October 5, 1995 4:20 pm PDT */
/*
* Cords are immutable character strings. A number of operations
* on long cords are much more efficient than their strings.h counterpart.
* In particular, concatenation takes constant time independent of the length
* of the arguments. (Cords are represented as trees, with internal
* nodes representing concatenation and leaves consisting of either C
* strings or a functional description of the string.)
*
* The following are reasonable applications of cords. They would perform
* unacceptably if C strings were used:
* - A compiler that produces assembly language output by repeatedly
* concatenating instructions onto a cord representing the output file.
* - A text editor that converts the input file to a cord, and then
* performs editing operations by producing a new cord representing
* the file after echa character change (and keeping the old ones in an
* edit history)
*
* For optimal performance, cords should be built by
* concatenating short sections.
* This interface is designed for maximum compatibility with C strings.
* ASCII NUL characters may be embedded in cords using CORD_from_fn.
* This is handled correctly, but CORD_to_char_star will produce a string
* with embedded NULs when given such a cord.
*
* This interface is fairly big, largely for performance reasons.
* The most basic constants and functions:
*
* CORD - the type fo a cord;
* CORD_EMPTY - empty cord;
* CORD_len(cord) - length of a cord;
* CORD_cat(cord1,cord2) - concatenation of two cords;
* CORD_substr(cord, start, len) - substring (or subcord);
* CORD_pos i; CORD_FOR(i, cord) { ... CORD_pos_fetch(i) ... } -
* examine each character in a cord. CORD_pos_fetch(i) is the char.
* CORD_fetch(int i) - Retrieve i'th character (slowly).
* CORD_cmp(cord1, cord2) - compare two cords.
* CORD_from_file(FILE * f) - turn a read-only file into a cord.
* CORD_to_char_star(cord) - convert to C string.
* (Non-NULL C constant strings are cords.)
* CORD_printf (etc.) - cord version of printf. Use %r for cords.
*/
# ifndef CORD_H
# define CORD_H
# include <stddef.h>
# include <stdio.h>
/* Cords have type const char *. This is cheating quite a bit, and not */
/* 100% portable. But it means that nonempty character string */
/* constants may be used as cords directly, provided the string is */
/* never modified in place. The empty cord is represented by, and */
/* can be written as, 0. */
typedef const char * CORD;
/* An empty cord is always represented as nil */
# define CORD_EMPTY 0
/* Is a nonempty cord represented as a C string? */
#define CORD_IS_STRING(s) (*(s) != '\0')
/* Concatenate two cords. If the arguments are C strings, they may */
/* not be subsequently altered. */
CORD CORD_cat(CORD x, CORD y);
/* Concatenate a cord and a C string with known length. Except for the */
/* empty string case, this is a special case of CORD_cat. Since the */
/* length is known, it can be faster. */
/* The string y is shared with the resulting CORD. Hence it should */
/* not be altered by the caller. */
CORD CORD_cat_char_star(CORD x, const char * y, size_t leny);
/* Compute the length of a cord */
size_t CORD_len(CORD x);
/* Cords may be represented by functions defining the ith character */
typedef char (* CORD_fn)(size_t i, void * client_data);
/* Turn a functional description into a cord. */
CORD CORD_from_fn(CORD_fn fn, void * client_data, size_t len);
/* Return the substring (subcord really) of x with length at most n, */
/* starting at position i. (The initial character has position 0.) */
CORD CORD_substr(CORD x, size_t i, size_t n);
/* Return the argument, but rebalanced to allow more efficient */
/* character retrieval, substring operations, and comparisons. */
/* This is useful only for cords that were built using repeated */
/* concatenation. Guarantees log time access to the result, unless */
/* x was obtained through a large number of repeated substring ops */
/* or the embedded functional descriptions take longer to evaluate. */
/* May reallocate significant parts of the cord. The argument is not */
/* modified; only the result is balanced. */
CORD CORD_balance(CORD x);
/* The following traverse a cord by applying a function to each */
/* character. This is occasionally appropriate, especially where */
/* speed is crucial. But, since C doesn't have nested functions, */
/* clients of this sort of traversal are clumsy to write. Consider */
/* the functions that operate on cord positions instead. */
/* Function to iteratively apply to individual characters in cord. */
typedef int (* CORD_iter_fn)(char c, void * client_data);
/* Function to apply to substrings of a cord. Each substring is a */
/* a C character string, not a general cord. */
typedef int (* CORD_batched_iter_fn)(const char * s, void * client_data);
# define CORD_NO_FN ((CORD_batched_iter_fn)0)
/* Apply f1 to each character in the cord, in ascending order, */
/* starting at position i. If */
/* f2 is not CORD_NO_FN, then multiple calls to f1 may be replaced by */
/* a single call to f2. The parameter f2 is provided only to allow */
/* some optimization by the client. This terminates when the right */
/* end of this string is reached, or when f1 or f2 return != 0. In the */
/* latter case CORD_iter returns != 0. Otherwise it returns 0. */
/* The specified value of i must be < CORD_len(x). */
int CORD_iter5(CORD x, size_t i, CORD_iter_fn f1,
CORD_batched_iter_fn f2, void * client_data);
/* A simpler version that starts at 0, and without f2: */
int CORD_iter(CORD x, CORD_iter_fn f1, void * client_data);
# define CORD_iter(x, f1, cd) CORD_iter5(x, 0, f1, CORD_NO_FN, cd)
/* Similar to CORD_iter5, but end-to-beginning. No provisions for */
/* CORD_batched_iter_fn. */
int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data);
/* A simpler version that starts at the end: */
int CORD_riter(CORD x, CORD_iter_fn f1, void * client_data);
/* Functions that operate on cord positions. The easy way to traverse */
/* cords. A cord position is logically a pair consisting of a cord */
/* and an index into that cord. But it is much faster to retrieve a */
/* charcter based on a position than on an index. Unfortunately, */
/* positions are big (order of a few 100 bytes), so allocate them with */
/* caution. */
/* Things in cord_pos.h should be treated as opaque, except as */
/* described below. Also note that */
/* CORD_pos_fetch, CORD_next and CORD_prev have both macro and function */
/* definitions. The former may evaluate their argument more than once. */
# include "private/cord_pos.h"
/*
Visible definitions from above:
typedef <OPAQUE but fairly big> CORD_pos[1];
* Extract the cord from a position:
CORD CORD_pos_to_cord(CORD_pos p);
* Extract the current index from a position:
size_t CORD_pos_to_index(CORD_pos p);
* Fetch the character located at the given position:
char CORD_pos_fetch(CORD_pos p);
* Initialize the position to refer to the given cord and index.
* Note that this is the most expensive function on positions:
void CORD_set_pos(CORD_pos p, CORD x, size_t i);
* Advance the position to the next character.
* P must be initialized and valid.
* Invalidates p if past end:
void CORD_next(CORD_pos p);
* Move the position to the preceding character.
* P must be initialized and valid.
* Invalidates p if past beginning:
void CORD_prev(CORD_pos p);
* Is the position valid, i.e. inside the cord?
int CORD_pos_valid(CORD_pos p);
*/
# define CORD_FOR(pos, cord) \
for (CORD_set_pos(pos, cord, 0); CORD_pos_valid(pos); CORD_next(pos))
/* An out of memory handler to call. May be supplied by client. */
/* Must not return. */
extern void (* CORD_oom_fn)(void);
/* Dump the representation of x to stdout in an implementation defined */
/* manner. Intended for debugging only. */
void CORD_dump(CORD x);
/* The following could easily be implemented by the client. They are */
/* provided in cordxtra.c for convenience. */
/* Concatenate a character to the end of a cord. */
CORD CORD_cat_char(CORD x, char c);
/* Concatenate n cords. */
CORD CORD_catn(int n, /* CORD */ ...);
/* Return the character in CORD_substr(x, i, 1) */
char CORD_fetch(CORD x, size_t i);
/* Return < 0, 0, or > 0, depending on whether x < y, x = y, x > y */
int CORD_cmp(CORD x, CORD y);
/* A generalization that takes both starting positions for the */
/* comparison, and a limit on the number of characters to be compared. */
int CORD_ncmp(CORD x, size_t x_start, CORD y, size_t y_start, size_t len);
/* Find the first occurrence of s in x at position start or later. */
/* Return the position of the first character of s in x, or */
/* CORD_NOT_FOUND if there is none. */
size_t CORD_str(CORD x, size_t start, CORD s);
/* Return a cord consisting of i copies of (possibly NUL) c. Dangerous */
/* in conjunction with CORD_to_char_star. */
/* The resulting representation takes constant space, independent of i. */
CORD CORD_chars(char c, size_t i);
# define CORD_nul(i) CORD_chars('\0', (i))
/* Turn a file into cord. The file must be seekable. Its contents */
/* must remain constant. The file may be accessed as an immediate */
/* result of this call and/or as a result of subsequent accesses to */
/* the cord. Short files are likely to be immediately read, but */
/* long files are likely to be read on demand, possibly relying on */
/* stdio for buffering. */
/* We must have exclusive access to the descriptor f, i.e. we may */
/* read it at any time, and expect the file pointer to be */
/* where we left it. Normally this should be invoked as */
/* CORD_from_file(fopen(...)) */
/* CORD_from_file arranges to close the file descriptor when it is no */
/* longer needed (e.g. when the result becomes inaccessible). */
/* The file f must be such that ftell reflects the actual character */
/* position in the file, i.e. the number of characters that can be */
/* or were read with fread. On UNIX systems this is always true. On */
/* MS Windows systems, f must be opened in binary mode. */
CORD CORD_from_file(FILE * f);
/* Equivalent to the above, except that the entire file will be read */
/* and the file pointer will be closed immediately. */
/* The binary mode restriction from above does not apply. */
CORD CORD_from_file_eager(FILE * f);
/* Equivalent to the above, except that the file will be read on demand.*/
/* The binary mode restriction applies. */
CORD CORD_from_file_lazy(FILE * f);
/* Turn a cord into a C string. The result shares no structure with */
/* x, and is thus modifiable. */
char * CORD_to_char_star(CORD x);
/* Turn a C string into a CORD. The C string is copied, and so may */
/* subsequently be modified. */
CORD CORD_from_char_star(const char *s);
/* Identical to the above, but the result may share structure with */
/* the argument and is thus not modifiable. */
const char * CORD_to_const_char_star(CORD x);
/* Write a cord to a file, starting at the current position. No */
/* trailing NULs are newlines are added. */
/* Returns EOF if a write error occurs, 1 otherwise. */
int CORD_put(CORD x, FILE * f);
/* "Not found" result for the following two functions. */
# define CORD_NOT_FOUND ((size_t)(-1))
/* A vague analog of strchr. Returns the position (an integer, not */
/* a pointer) of the first occurrence of (char) c inside x at position */
/* i or later. The value i must be < CORD_len(x). */
size_t CORD_chr(CORD x, size_t i, int c);
/* A vague analog of strrchr. Returns index of the last occurrence */
/* of (char) c inside x at position i or earlier. The value i */
/* must be < CORD_len(x). */
size_t CORD_rchr(CORD x, size_t i, int c);
/* The following are also not primitive, but are implemented in */
/* cordprnt.c. They provide functionality similar to the ANSI C */
/* functions with corresponding names, but with the following */
/* additions and changes: */
/* 1. A %r conversion specification specifies a CORD argument. Field */
/* width, precision, etc. have the same semantics as for %s. */
/* (Note that %c,%C, and %S were already taken.) */
/* 2. The format string is represented as a CORD. */
/* 3. CORD_sprintf and CORD_vsprintf assign the result through the 1st */ /* argument. Unlike their ANSI C versions, there is no need to guess */
/* the correct buffer size. */
/* 4. Most of the conversions are implement through the native */
/* vsprintf. Hence they are usually no faster, and */
/* idiosyncracies of the native printf are preserved. However, */
/* CORD arguments to CORD_sprintf and CORD_vsprintf are NOT copied; */
/* the result shares the original structure. This may make them */
/* very efficient in some unusual applications. */
/* The format string is copied. */
/* All functions return the number of characters generated or -1 on */
/* error. This complies with the ANSI standard, but is inconsistent */
/* with some older implementations of sprintf. */
/* The implementation of these is probably less portable than the rest */
/* of this package. */
#ifndef CORD_NO_IO
#include <stdarg.h>
int CORD_sprintf(CORD * out, CORD format, ...);
int CORD_vsprintf(CORD * out, CORD format, va_list args);
int CORD_fprintf(FILE * f, CORD format, ...);
int CORD_vfprintf(FILE * f, CORD format, va_list args);
int CORD_printf(CORD format, ...);
int CORD_vprintf(CORD format, va_list args);
#endif /* CORD_NO_IO */
# endif /* CORD_H */

70
gc/include/ec.h Normal file
View File

@@ -0,0 +1,70 @@
# ifndef EC_H
# define EC_H
# ifndef CORD_H
# include "cord.h"
# endif
/* Extensible cords are strings that may be destructively appended to. */
/* They allow fast construction of cords from characters that are */
/* being read from a stream. */
/*
* A client might look like:
*
* {
* CORD_ec x;
* CORD result;
* char c;
* FILE *f;
*
* ...
* CORD_ec_init(x);
* while(...) {
* c = getc(f);
* ...
* CORD_ec_append(x, c);
* }
* result = CORD_balance(CORD_ec_to_cord(x));
*
* If a C string is desired as the final result, the call to CORD_balance
* may be replaced by a call to CORD_to_char_star.
*/
# ifndef CORD_BUFSZ
# define CORD_BUFSZ 128
# endif
typedef struct CORD_ec_struct {
CORD ec_cord;
char * ec_bufptr;
char ec_buf[CORD_BUFSZ+1];
} CORD_ec[1];
/* This structure represents the concatenation of ec_cord with */
/* ec_buf[0 ... (ec_bufptr-ec_buf-1)] */
/* Flush the buffer part of the extended chord into ec_cord. */
/* Note that this is almost the only real function, and it is */
/* implemented in 6 lines in cordxtra.c */
void CORD_ec_flush_buf(CORD_ec x);
/* Convert an extensible cord to a cord. */
# define CORD_ec_to_cord(x) (CORD_ec_flush_buf(x), (x)[0].ec_cord)
/* Initialize an extensible cord. */
# define CORD_ec_init(x) ((x)[0].ec_cord = 0, (x)[0].ec_bufptr = (x)[0].ec_buf)
/* Append a character to an extensible cord. */
# define CORD_ec_append(x, c) \
{ \
if ((x)[0].ec_bufptr == (x)[0].ec_buf + CORD_BUFSZ) { \
CORD_ec_flush_buf(x); \
} \
*((x)[0].ec_bufptr)++ = (c); \
}
/* Append a cord to an extensible cord. Structure remains shared with */
/* original. */
void CORD_ec_append_cord(CORD_ec x, CORD s);
# endif /* EC_H */

754
gc/include/gc.h Normal file
View File

@@ -0,0 +1,754 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright 1996 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/*
* Note that this defines a large number of tuning hooks, which can
* safely be ignored in nearly all cases. For normal use it suffices
* to call only GC_MALLOC and perhaps GC_REALLOC.
* For better performance, also look at GC_MALLOC_ATOMIC, and
* GC_enable_incremental. If you need an action to be performed
* immediately before an object is collected, look at GC_register_finalizer.
* If you are using Solaris threads, look at the end of this file.
* Everything else is best ignored unless you encounter performance
* problems.
*/
#ifndef _GC_H
# define _GC_H
# define __GC
# include <stddef.h>
#if defined(__CYGWIN32__) && defined(GC_USE_DLL)
#include "libgc_globals.h"
#endif
#if defined(_MSC_VER) && defined(_DLL)
# ifdef GC_BUILD
# define GC_API __declspec(dllexport)
# else
# define GC_API __declspec(dllimport)
# endif
#endif
#if defined(__WATCOMC__) && defined(GC_DLL)
# ifdef GC_BUILD
# define GC_API extern __declspec(dllexport)
# else
# define GC_API extern __declspec(dllimport)
# endif
#endif
#ifndef GC_API
#define GC_API extern
#endif
# if defined(__STDC__) || defined(__cplusplus)
# define GC_PROTO(args) args
typedef void * GC_PTR;
# else
# define GC_PROTO(args) ()
typedef char * GC_PTR;
# endif
# ifdef __cplusplus
extern "C" {
# endif
/* Define word and signed_word to be unsigned and signed types of the */
/* size as char * or void *. There seems to be no way to do this */
/* even semi-portably. The following is probably no better/worse */
/* than almost anything else. */
/* The ANSI standard suggests that size_t and ptr_diff_t might be */
/* better choices. But those appear to have incorrect definitions */
/* on may systems. Notably "typedef int size_t" seems to be both */
/* frequent and WRONG. */
typedef unsigned long GC_word;
typedef long GC_signed_word;
/* Public read-only variables */
GC_API GC_word GC_gc_no;/* Counter incremented per collection. */
/* Includes empty GCs at startup. */
/* Public R/W variables */
GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
/* When there is insufficient memory to satisfy */
/* an allocation request, we return */
/* (*GC_oom_fn)(). By default this just */
/* returns 0. */
/* If it returns, it must return 0 or a valid */
/* pointer to a previously allocated heap */
/* object. */
GC_API int GC_find_leak;
/* Do not actually garbage collect, but simply */
/* report inaccessible memory that was not */
/* deallocated with GC_free. Initial value */
/* is determined by FIND_LEAK macro. */
GC_API int GC_quiet; /* Disable statistics output. Only matters if */
/* collector has been compiled with statistics */
/* enabled. This involves a performance cost, */
/* and is thus not the default. */
GC_API int GC_finalize_on_demand;
/* If nonzero, finalizers will only be run in */
/* response to an eplit GC_invoke_finalizers */
/* call. The default is determined by whether */
/* the FINALIZE_ON_DEMAND macro is defined */
/* when the collector is built. */
GC_API int GC_java_finalization;
/* Mark objects reachable from finalizable */
/* objects in a separate postpass. This makes */
/* it a bit safer to use non-topologically- */
/* ordered finalization. Default value is */
/* determined by JAVA_FINALIZATION macro. */
GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
/* because it's not safe. */
GC_API int GC_dont_expand;
/* Dont expand heap unless explicitly requested */
/* or forced to. */
GC_API int GC_full_freq; /* Number of partial collections between */
/* full collections. Matters only if */
/* GC_incremental is set. */
GC_API GC_word GC_non_gc_bytes;
/* Bytes not considered candidates for collection. */
/* Used only to control scheduling of collections. */
GC_API GC_word GC_free_space_divisor;
/* We try to make sure that we allocate at */
/* least N/GC_free_space_divisor bytes between */
/* collections, where N is the heap size plus */
/* a rough estimate of the root set size. */
/* Initially, GC_free_space_divisor = 4. */
/* Increasing its value will use less space */
/* but more collection time. Decreasing it */
/* will appreciably decrease collection time */
/* at the expense of space. */
/* GC_free_space_divisor = 1 will effectively */
/* disable collections. */
GC_API GC_word GC_max_retries;
/* The maximum number of GCs attempted before */
/* reporting out of memory after heap */
/* expansion fails. Initially 0. */
GC_API char *GC_stackbottom; /* Cool end of user stack. */
/* May be set in the client prior to */
/* calling any GC_ routines. This */
/* avoids some overhead, and */
/* potentially some signals that can */
/* confuse debuggers. Otherwise the */
/* collector attempts to set it */
/* automatically. */
/* For multithreaded code, this is the */
/* cold end of the stack for the */
/* primordial thread. */
/* Public procedures */
/*
* general purpose allocation routines, with roughly malloc calling conv.
* The atomic versions promise that no relevant pointers are contained
* in the object. The nonatomic versions guarantee that the new object
* is cleared. GC_malloc_stubborn promises that no changes to the object
* will occur after GC_end_stubborn_change has been called on the
* result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
* that is scanned for pointers to collectable objects, but is not itself
* collectable. GC_malloc_uncollectable and GC_free called on the resulting
* object implicitly update GC_non_gc_bytes appropriately.
*/
GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
/* The following is only defined if the library has been suitably */
/* compiled: */
GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes));
/* Explicitly deallocate an object. Dangerous if used incorrectly. */
/* Requires a pointer to the base of an object. */
/* If the argument is stubborn, it should not be changeable when freed. */
/* An object should not be enable for finalization when it is */
/* explicitly deallocated. */
/* GC_free(0) is a no-op, as required by ANSI C for free. */
GC_API void GC_free GC_PROTO((GC_PTR object_addr));
/*
* Stubborn objects may be changed only if the collector is explicitly informed.
* The collector is implicitly informed of coming change when such
* an object is first allocated. The following routines inform the
* collector that an object will no longer be changed, or that it will
* once again be changed. Only nonNIL pointer stores into the object
* are considered to be changes. The argument to GC_end_stubborn_change
* must be exacly the value returned by GC_malloc_stubborn or passed to
* GC_change_stubborn. (In the second case it may be an interior pointer
* within 512 bytes of the beginning of the objects.)
* There is a performance penalty for allowing more than
* one stubborn object to be changed at once, but it is acceptable to
* do so. The same applies to dropping stubborn objects that are still
* changeable.
*/
GC_API void GC_change_stubborn GC_PROTO((GC_PTR));
GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR));
/* Return a pointer to the base (lowest address) of an object given */
/* a pointer to a location within the object. */
/* Return 0 if displaced_pointer doesn't point to within a valid */
/* object. */
GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
/* Given a pointer to the base of an object, return its size in bytes. */
/* The returned size may be slightly larger than what was originally */
/* requested. */
GC_API size_t GC_size GC_PROTO((GC_PTR object_addr));
/* For compatibility with C library. This is occasionally faster than */
/* a malloc followed by a bcopy. But if you rely on that, either here */
/* or with the standard C library, your code is broken. In my */
/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
/* The resulting object has the same kind as the original. */
/* If the argument is stubborn, the result will have changes enabled. */
/* It is an error to have changes enabled for the original object. */
/* Follows ANSI comventions for NULL old_object. */
GC_API GC_PTR GC_realloc
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes));
/* Explicitly increase the heap size. */
/* Returns 0 on failure, 1 on success. */
GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes));
/* Limit the heap size to n bytes. Useful when you're debugging, */
/* especially on systems that don't handle running out of memory well. */
/* n == 0 ==> unbounded. This is the default. */
GC_API void GC_set_max_heap_size GC_PROTO((GC_word n));
/* Inform the collector that a certain section of statically allocated */
/* memory contains no pointers to garbage collected memory. Thus it */
/* need not be scanned. This is sometimes important if the application */
/* maps large read/write files into the address space, which could be */
/* mistaken for dynamic library data segments on some systems. */
GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish));
/* Clear the set of root segments. Wizards only. */
GC_API void GC_clear_roots GC_PROTO((void));
/* Add a root segment. Wizards only. */
GC_API void GC_add_roots GC_PROTO((char * low_address,
char * high_address_plus_1));
/* Add a displacement to the set of those considered valid by the */
/* collector. GC_register_displacement(n) means that if p was returned */
/* by GC_malloc, then (char *)p + n will be considered to be a valid */
/* pointer to n. N must be small and less than the size of p. */
/* (All pointers to the interior of objects from the stack are */
/* considered valid in any case. This applies to heap objects and */
/* static data.) */
/* Preferably, this should be called before any other GC procedures. */
/* Calling it later adds to the probability of excess memory */
/* retention. */
/* This is a no-op if the collector was compiled with recognition of */
/* arbitrary interior pointers enabled, which is now the default. */
GC_API void GC_register_displacement GC_PROTO((GC_word n));
/* The following version should be used if any debugging allocation is */
/* being done. */
GC_API void GC_debug_register_displacement GC_PROTO((GC_word n));
/* Explicitly trigger a full, world-stop collection. */
GC_API void GC_gcollect GC_PROTO((void));
/* Trigger a full world-stopped collection. Abort the collection if */
/* and when stop_func returns a nonzero value. Stop_func will be */
/* called frequently, and should be reasonably fast. This works even */
/* if virtual dirty bits, and hence incremental collection is not */
/* available for this architecture. Collections can be aborted faster */
/* than normal pause times for incremental collection. However, */
/* aborted collections do no useful work; the next collection needs */
/* to start from the beginning. */
/* Return 0 if the collection was aborted, 1 if it succeeded. */
typedef int (* GC_stop_func) GC_PROTO((void));
GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
/* Return the number of bytes in the heap. Excludes collector private */
/* data structures. Includes empty blocks and fragmentation loss. */
/* Includes some pages that were allocated but never written. */
GC_API size_t GC_get_heap_size GC_PROTO((void));
/* Return the number of bytes allocated since the last collection. */
GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
/* Enable incremental/generational collection. */
/* Not advisable unless dirty bits are */
/* available or most heap objects are */
/* pointerfree(atomic) or immutable. */
/* Don't use in leak finding mode. */
/* Ignored if GC_dont_gc is true. */
GC_API void GC_enable_incremental GC_PROTO((void));
/* Perform some garbage collection work, if appropriate. */
/* Return 0 if there is no more work to be done. */
/* Typically performs an amount of work corresponding roughly */
/* to marking from one page. May do more work if further */
/* progress requires it, e.g. if incremental collection is */
/* disabled. It is reasonable to call this in a wait loop */
/* until it returns 0. */
GC_API int GC_collect_a_little GC_PROTO((void));
/* Allocate an object of size lb bytes. The client guarantees that */
/* as long as the object is live, it will be referenced by a pointer */
/* that points to somewhere within the first 256 bytes of the object. */
/* (This should normally be declared volatile to prevent the compiler */
/* from invalidating this assertion.) This routine is only useful */
/* if a large array is being allocated. It reduces the chance of */
/* accidentally retaining such an array as a result of scanning an */
/* integer that happens to be an address inside the array. (Actually, */
/* it reduces the chance of the allocator not finding space for such */
/* an array, since it will try hard to avoid introducing such a false */
/* reference.) On a SunOS 4.X or MS Windows system this is recommended */
/* for arrays likely to be larger than 100K or so. For other systems, */
/* or if the collector is not configured to recognize all interior */
/* pointers, the threshold is normally much higher. */
GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
# define GC_ADD_CALLER
# define GC_RETURN_ADDR (GC_word)__return_address
#endif
#ifdef GC_ADD_CALLER
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
#else
# define GC_EXTRAS __FILE__, __LINE__
# define GC_EXTRA_PARAMS char * descr_string, int descr_int
#endif
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
GC_API GC_PTR GC_debug_malloc
GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_atomic
GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_uncollectable
GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_stubborn
GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
GC_API GC_PTR GC_debug_realloc
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
GC_EXTRA_PARAMS));
GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
GC_EXTRAS)
# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
# define GC_FREE(p) GC_debug_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_debug_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
GC_general_register_disappearing_link(link, GC_base(obj))
# define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
# else
# define GC_MALLOC(sz) GC_malloc(sz)
# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
# define GC_REALLOC(old, sz) GC_realloc(old, sz)
# define GC_FREE(p) GC_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_register_finalizer_ignore_self(p, f, d, of, od)
# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
GC_general_register_disappearing_link(link, obj)
# define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
# endif
/* The following are included because they are often convenient, and */
/* reduce the chance for a misspecifed size argument. But calls may */
/* expand to something syntactically incorrect if t is a complicated */
/* type expression. */
# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
/* Finalization. Some of these primitives are grossly unsafe. */
/* The idea is to make them both cheap, and sufficient to build */
/* a safer layer, closer to PCedar finalization. */
/* The interface represents my conclusions from a long discussion */
/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
typedef void (*GC_finalization_proc)
GC_PROTO((GC_PTR obj, GC_PTR client_data));
GC_API void GC_register_finalizer
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
GC_finalization_proc *ofn, GC_PTR *ocd));
GC_API void GC_debug_register_finalizer
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
GC_finalization_proc *ofn, GC_PTR *ocd));
/* When obj is no longer accessible, invoke */
/* (*fn)(obj, cd). If a and b are inaccessible, and */
/* a points to b (after disappearing links have been */
/* made to disappear), then only a will be */
/* finalized. (If this does not create any new */
/* pointers to b, then b will be finalized after the */
/* next collection.) Any finalizable object that */
/* is reachable from itself by following one or more */
/* pointers will not be finalized (or collected). */
/* Thus cycles involving finalizable objects should */
/* be avoided, or broken by disappearing links. */
/* All but the last finalizer registered for an object */
/* is ignored. */
/* Finalization may be removed by passing 0 as fn. */
/* Finalizers are implicitly unregistered just before */
/* they are invoked. */
/* The old finalizer and client data are stored in */
/* *ofn and *ocd. */
/* Fn is never invoked on an accessible object, */
/* provided hidden pointers are converted to real */
/* pointers only if the allocation lock is held, and */
/* such conversions are not performed by finalization */
/* routines. */
/* If GC_register_finalizer is aborted as a result of */
/* a signal, the object may be left with no */
/* finalization, even if neither the old nor new */
/* finalizer were NULL. */
/* Obj should be the nonNULL starting address of an */
/* object allocated by GC_malloc or friends. */
/* Note that any garbage collectable object referenced */
/* by cd will be considered accessible until the */
/* finalizer is invoked. */
/* Another versions of the above follow. It ignores */
/* self-cycles, i.e. pointers from a finalizable object to */
/* itself. There is a stylistic argument that this is wrong, */
/* but it's unavoidable for C++, since the compiler may */
/* silently introduce these. It's also benign in that specific */
/* case. */
GC_API void GC_register_finalizer_ignore_self
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
GC_finalization_proc *ofn, GC_PTR *ocd));
GC_API void GC_debug_register_finalizer_ignore_self
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
GC_finalization_proc *ofn, GC_PTR *ocd));
/* The following routine may be used to break cycles between */
/* finalizable objects, thus causing cyclic finalizable */
/* objects to be finalized in the correct order. Standard */
/* use involves calling GC_register_disappearing_link(&p), */
/* where p is a pointer that is not followed by finalization */
/* code, and should not be considered in determining */
/* finalization order. */
GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
/* Link should point to a field of a heap allocated */
/* object obj. *link will be cleared when obj is */
/* found to be inaccessible. This happens BEFORE any */
/* finalization code is invoked, and BEFORE any */
/* decisions about finalization order are made. */
/* This is useful in telling the finalizer that */
/* some pointers are not essential for proper */
/* finalization. This may avoid finalization cycles. */
/* Note that obj may be resurrected by another */
/* finalizer, and thus the clearing of *link may */
/* be visible to non-finalization code. */
/* There's an argument that an arbitrary action should */
/* be allowed here, instead of just clearing a pointer. */
/* But this causes problems if that action alters, or */
/* examines connectivity. */
/* Returns 1 if link was already registered, 0 */
/* otherwise. */
/* Only exists for backward compatibility. See below: */
GC_API int GC_general_register_disappearing_link
GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
/* A slight generalization of the above. *link is */
/* cleared when obj first becomes inaccessible. This */
/* can be used to implement weak pointers easily and */
/* safely. Typically link will point to a location */
/* holding a disguised pointer to obj. (A pointer */
/* inside an "atomic" object is effectively */
/* disguised.) In this way soft */
/* pointers are broken before any object */
/* reachable from them are finalized. Each link */
/* May be registered only once, i.e. with one obj */
/* value. This was added after a long email discussion */
/* with John Ellis. */
/* Obj must be a pointer to the first word of an object */
/* we allocated. It is unsafe to explicitly deallocate */
/* the object containing link. Explicitly deallocating */
/* obj may or may not cause link to eventually be */
/* cleared. */
GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
/* Returns 0 if link was not actually registered. */
/* Undoes a registration by either of the above two */
/* routines. */
/* Auxiliary fns to make finalization work correctly with displaced */
/* pointers introduced by the debugging allocators. */
GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
GC_API int GC_invoke_finalizers GC_PROTO((void));
/* Run finalizers for all objects that are ready to */
/* be finalized. Return the number of finalizers */
/* that were run. Normally this is also called */
/* implicitly during some allocations. If */
/* GC-finalize_on_demand is nonzero, it must be called */
/* explicitly. */
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
/* p may not be a NULL pointer. */
typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
/* Returns old warning procedure. */
/* The following is intended to be used by a higher level */
/* (e.g. cedar-like) finalization facility. It is expected */
/* that finalization code will arrange for hidden pointers to */
/* disappear. Otherwise objects can be accessed after they */
/* have been collected. */
/* Note that putting pointers in atomic objects or in */
/* nonpointer slots of "typed" objects is equivalent to */
/* disguising them in this way, and may have other advantages. */
# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
typedef GC_word GC_hidden_pointer;
# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
/* Converting a hidden pointer to a real pointer requires verifying */
/* that the object still exists. This involves acquiring the */
/* allocator lock to avoid a race with the collector. */
# endif /* I_HIDE_POINTERS */
typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
GC_API GC_PTR GC_call_with_alloc_lock
GC_PROTO((GC_fn_type fn, GC_PTR client_data));
/* Check that p and q point to the same object. */
/* Fail conspicuously if they don't. */
/* Returns the first argument. */
/* Succeeds if neither p nor q points to the heap. */
/* May succeed if both p and q point to between heap objects. */
GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
/* Checked pointer pre- and post- increment operations. Note that */
/* the second argument is in units of bytes, not multiples of the */
/* object size. This should either be invoked from a macro, or the */
/* call should be automatically generated. */
GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
/* Check that p is visible */
/* to the collector as a possibly pointer containing location. */
/* If it isn't fail conspicuously. */
/* Returns the argument in all cases. May erroneously succeed */
/* in hard cases. (This is intended for debugging use with */
/* untyped allocations. The idea is that it should be possible, though */
/* slow, to add such a call to all indirect pointer stores.) */
/* Currently useless for multithreaded worlds. */
GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
/* Check that if p is a pointer to a heap page, then it points to */
/* a valid displacement within a heap object. */
/* Fail conspicuously if this property does not hold. */
/* Uninteresting with ALL_INTERIOR_POINTERS. */
/* Always returns its argument. */
GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
/* Safer, but slow, pointer addition. Probably useful mainly with */
/* a preprocessor. Useful only for heap pointers. */
#ifdef GC_DEBUG
# define GC_PTR_ADD3(x, n, type_of_result) \
((type_of_result)GC_same_obj((x)+(n), (x)))
# define GC_PRE_INCR3(x, n, type_of_result) \
((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
# define GC_POST_INCR2(x, type_of_result) \
((type_of_result)GC_post_incr(&(x), sizeof(*x))
# ifdef __GNUC__
# define GC_PTR_ADD(x, n) \
GC_PTR_ADD3(x, n, typeof(x))
# define GC_PRE_INCR(x, n) \
GC_PRE_INCR3(x, n, typeof(x))
# define GC_POST_INCR(x, n) \
GC_POST_INCR3(x, typeof(x))
# else
/* We can't do this right without typeof, which ANSI */
/* decided was not sufficiently useful. Repeatedly */
/* mentioning the arguments seems too dangerous to be */
/* useful. So does not casting the result. */
# define GC_PTR_ADD(x, n) ((x)+(n))
# endif
#else /* !GC_DEBUG */
# define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
# define GC_PTR_ADD(x, n) ((x)+(n))
# define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
# define GC_PRE_INCR(x, n) ((x) += (n))
# define GC_POST_INCR2(x, n, type_of_result) ((x)++)
# define GC_POST_INCR(x, n) ((x)++)
#endif
/* Safer assignment of a pointer to a nonstack location. */
#ifdef GC_DEBUG
# ifdef __STDC__
# define GC_PTR_STORE(p, q) \
(*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
# else
# define GC_PTR_STORE(p, q) \
(*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
# endif
#else /* !GC_DEBUG */
# define GC_PTR_STORE(p, q) *((p) = (q))
#endif
/* Fynctions called to report pointer checking errors */
GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q));
GC_API void (*GC_is_valid_displacement_print_proc)
GC_PROTO((GC_PTR p));
GC_API void (*GC_is_visible_print_proc)
GC_PROTO((GC_PTR p));
#if defined(_SOLARIS_PTHREADS) && !defined(SOLARIS_THREADS)
# define SOLARIS_THREADS
#endif
#ifdef SOLARIS_THREADS
/* We need to intercept calls to many of the threads primitives, so */
/* that we can locate thread stacks and stop the world. */
/* Note also that the collector cannot see thread specific data. */
/* Thread specific data should generally consist of pointers to */
/* uncollectable objects, which are deallocated using the destructor */
/* facility in thr_keycreate. */
# include <thread.h>
# include <signal.h>
int GC_thr_create(void *stack_base, size_t stack_size,
void *(*start_routine)(void *), void *arg, long flags,
thread_t *new_thread);
int GC_thr_join(thread_t wait_for, thread_t *departed, void **status);
int GC_thr_suspend(thread_t target_thread);
int GC_thr_continue(thread_t target_thread);
void * GC_dlopen(const char *path, int mode);
# ifdef _SOLARIS_PTHREADS
# include <pthread.h>
extern int GC_pthread_create(pthread_t *new_thread,
const pthread_attr_t *attr,
void * (*thread_execp)(void *), void *arg);
extern int GC_pthread_join(pthread_t wait_for, void **status);
# undef thread_t
# define pthread_join GC_pthread_join
# define pthread_create GC_pthread_create
#endif
# define thr_create GC_thr_create
# define thr_join GC_thr_join
# define thr_suspend GC_thr_suspend
# define thr_continue GC_thr_continue
# define dlopen GC_dlopen
# endif /* SOLARIS_THREADS */
#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
int GC_pthread_create(pthread_t *new_thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *), void *arg);
int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
int GC_pthread_join(pthread_t thread, void **retval);
# define pthread_create GC_pthread_create
# define pthread_sigmask GC_pthread_sigmask
# define pthread_join GC_pthread_join
#endif /* IRIX_THREADS || LINUX_THREADS */
# if defined(PCR) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
defined(IRIX_THREADS) || defined(LINUX_THREADS) || \
defined(IRIX_JDK_THREADS)
/* Any flavor of threads except SRC_M3. */
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
/* lb must be large enough to hold the pointer field. */
GC_PTR GC_malloc_many(size_t lb);
#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
/* in returned list. */
extern void GC_thr_init(); /* Needed for Solaris/X86 */
#endif /* THREADS && !SRC_M3 */
/*
* If you are planning on putting
* the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
* from the statically loaded program section.
* This circumvents a Solaris 2.X (X<=4) linker bug.
*/
#if defined(sparc) || defined(__sparc)
# define GC_INIT() { extern end, etext; \
GC_noop(&end, &etext); }
#else
# if defined(__CYGWIN32__) && defined(GC_USE_DLL)
/*
* Similarly gnu-win32 DLLs need explicit initialization
*/
# define GC_INIT() { GC_add_roots(DATASTART, DATAEND); }
# else
# define GC_INIT()
# endif
#endif
#if (defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
|| defined(_WIN32)
/* win32S may not free all resources on process exit. */
/* This explicitly deallocates the heap. */
GC_API void GC_win32_free_heap ();
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* _GC_H */

380
gc/include/gc_alloc.h Normal file
View File

@@ -0,0 +1,380 @@
/*
* Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
//
// This is a C++ header file that is intended to replace the SGI STL
// alloc.h. This assumes SGI STL version < 3.0.
//
// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE
// and -DALL_INTERIOR_POINTERS. We also recommend
// -DREDIRECT_MALLOC=GC_uncollectable_malloc.
//
// Some of this could be faster in the explicit deallocation case. In particular,
// we spend too much time clearing objects on the free lists. That could be avoided.
//
// This uses template classes with static members, and hence does not work
// with g++ 2.7.2 and earlier.
//
#include "gc.h"
#ifndef GC_ALLOC_H
#define GC_ALLOC_H
#define __ALLOC_H // Prevent inclusion of the default version. Ugly.
#define __SGI_STL_ALLOC_H
#define __SGI_STL_INTERNAL_ALLOC_H
#ifndef __ALLOC
# define __ALLOC alloc
#endif
#include <stddef.h>
#include <string.h>
// The following is just replicated from the conventional SGI alloc.h:
template<class T, class alloc>
class simple_alloc {
public:
static T *allocate(size_t n)
{ return 0 == n? 0 : (T*) alloc::allocate(n * sizeof (T)); }
static T *allocate(void)
{ return (T*) alloc::allocate(sizeof (T)); }
static void deallocate(T *p, size_t n)
{ if (0 != n) alloc::deallocate(p, n * sizeof (T)); }
static void deallocate(T *p)
{ alloc::deallocate(p, sizeof (T)); }
};
#include "gc.h"
// The following need to match collector data structures.
// We can't include gc_priv.h, since that pulls in way too much stuff.
// This should eventually be factored out into another include file.
extern "C" {
extern void ** const GC_objfreelist_ptr;
extern void ** const GC_aobjfreelist_ptr;
extern void ** const GC_uobjfreelist_ptr;
extern void ** const GC_auobjfreelist_ptr;
extern void GC_incr_words_allocd(size_t words);
extern void GC_incr_mem_freed(size_t words);
extern char * GC_generic_malloc_words_small(size_t word, int kind);
}
// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
// AUNCOLLECTABLE in gc_priv.h.
enum { GC_PTRFREE = 0, GC_NORMAL = 1, GC_UNCOLLECTABLE = 2,
GC_AUNCOLLECTABLE = 3 };
enum { GC_max_fast_bytes = 255 };
enum { GC_bytes_per_word = sizeof(char *) };
enum { GC_byte_alignment = 8 };
enum { GC_word_alignment = GC_byte_alignment/GC_bytes_per_word };
inline void * &GC_obj_link(void * p)
{ return *(void **)p; }
// Compute a number of words >= n+1 bytes.
// The +1 allows for pointers one past the end.
inline size_t GC_round_up(size_t n)
{
return ((n + GC_byte_alignment)/GC_byte_alignment)*GC_word_alignment;
}
// The same but don't allow for extra byte.
inline size_t GC_round_up_uncollectable(size_t n)
{
return ((n + GC_byte_alignment - 1)/GC_byte_alignment)*GC_word_alignment;
}
template <int dummy>
class GC_aux_template {
public:
// File local count of allocated words. Occasionally this is
// added into the global count. A separate count is necessary since the
// real one must be updated with a procedure call.
static size_t GC_words_recently_allocd;
// Same for uncollectable mmory. Not yet reflected in either
// GC_words_recently_allocd or GC_non_gc_bytes.
static size_t GC_uncollectable_words_recently_allocd;
// Similar counter for explicitly deallocated memory.
static size_t GC_mem_recently_freed;
// Again for uncollectable memory.
static size_t GC_uncollectable_mem_recently_freed;
static void * GC_out_of_line_malloc(size_t nwords, int kind);
};
template <int dummy>
size_t GC_aux_template<dummy>::GC_words_recently_allocd = 0;
template <int dummy>
size_t GC_aux_template<dummy>::GC_uncollectable_words_recently_allocd = 0;
template <int dummy>
size_t GC_aux_template<dummy>::GC_mem_recently_freed = 0;
template <int dummy>
size_t GC_aux_template<dummy>::GC_uncollectable_mem_recently_freed = 0;
template <int dummy>
void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
{
GC_words_recently_allocd += GC_uncollectable_words_recently_allocd;
GC_non_gc_bytes +=
GC_bytes_per_word * GC_uncollectable_words_recently_allocd;
GC_uncollectable_words_recently_allocd = 0;
GC_mem_recently_freed += GC_uncollectable_mem_recently_freed;
GC_non_gc_bytes -=
GC_bytes_per_word * GC_uncollectable_mem_recently_freed;
GC_uncollectable_mem_recently_freed = 0;
GC_incr_words_allocd(GC_words_recently_allocd);
GC_words_recently_allocd = 0;
GC_incr_mem_freed(GC_mem_recently_freed);
GC_mem_recently_freed = 0;
return GC_generic_malloc_words_small(nwords, kind);
}
typedef GC_aux_template<0> GC_aux;
// A fast, single-threaded, garbage-collected allocator
// We assume the first word will be immediately overwritten.
// In this version, deallocation is not a noop, and explicit
// deallocation is likely to help performance.
template <int dummy>
class single_client_gc_alloc_template {
public:
static void * allocate(size_t n)
{
size_t nwords = GC_round_up(n);
void ** flh;
void * op;
if (n > GC_max_fast_bytes) return GC_malloc(n);
flh = GC_objfreelist_ptr + nwords;
if (0 == (op = *flh)) {
return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
}
*flh = GC_obj_link(op);
GC_aux::GC_words_recently_allocd += nwords;
return op;
}
static void * ptr_free_allocate(size_t n)
{
size_t nwords = GC_round_up(n);
void ** flh;
void * op;
if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
flh = GC_aobjfreelist_ptr + nwords;
if (0 == (op = *flh)) {
return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
}
*flh = GC_obj_link(op);
GC_aux::GC_words_recently_allocd += nwords;
return op;
}
static void deallocate(void *p, size_t n)
{
size_t nwords = GC_round_up(n);
void ** flh;
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
flh = GC_objfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
memset((char *)p + GC_bytes_per_word, 0,
GC_bytes_per_word * (nwords - 1));
*flh = p;
GC_aux::GC_mem_recently_freed += nwords;
}
}
static void ptr_free_deallocate(void *p, size_t n)
{
size_t nwords = GC_round_up(n);
void ** flh;
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
flh = GC_aobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_mem_recently_freed += nwords;
}
}
};
typedef single_client_gc_alloc_template<0> single_client_gc_alloc;
// Once more, for uncollectable objects.
template <int dummy>
class single_client_alloc_template {
public:
static void * allocate(size_t n)
{
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
void * op;
if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
flh = GC_uobjfreelist_ptr + nwords;
if (0 == (op = *flh)) {
return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
}
*flh = GC_obj_link(op);
GC_aux::GC_uncollectable_words_recently_allocd += nwords;
return op;
}
static void * ptr_free_allocate(size_t n)
{
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
void * op;
if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
flh = GC_auobjfreelist_ptr + nwords;
if (0 == (op = *flh)) {
return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
}
*flh = GC_obj_link(op);
GC_aux::GC_uncollectable_words_recently_allocd += nwords;
return op;
}
static void deallocate(void *p, size_t n)
{
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
flh = GC_uobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_uncollectable_mem_recently_freed += nwords;
}
}
static void ptr_free_deallocate(void *p, size_t n)
{
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
flh = GC_auobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_uncollectable_mem_recently_freed += nwords;
}
}
};
typedef single_client_alloc_template<0> single_client_alloc;
template < int dummy >
class gc_alloc_template {
public:
static void * allocate(size_t n) { return GC_malloc(n); }
static void * ptr_free_allocate(size_t n)
{ return GC_malloc_atomic(n); }
static void deallocate(void *, size_t) { }
static void ptr_free_deallocate(void *, size_t) { }
};
typedef gc_alloc_template < 0 > gc_alloc;
template < int dummy >
class alloc_template {
public:
static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
static void * ptr_free_allocate(size_t n)
{ return GC_malloc_atomic_uncollectable(n); }
static void deallocate(void *p, size_t) { GC_free(p); }
static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
};
typedef alloc_template < 0 > alloc;
#ifdef _SGI_SOURCE
// We want to specialize simple_alloc so that it does the right thing
// for all pointerfree types. At the moment there is no portable way to
// even approximate that. The following approximation should work for
// SGI compilers, and perhaps some others.
# define __GC_SPECIALIZE(T,alloc) \
class simple_alloc<T, alloc> { \
public: \
static T *allocate(size_t n) \
{ return 0 == n? 0 : \
(T*) alloc::ptr_free_allocate(n * sizeof (T)); } \
static T *allocate(void) \
{ return (T*) alloc::ptr_free_allocate(sizeof (T)); } \
static void deallocate(T *p, size_t n) \
{ if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \
static void deallocate(T *p) \
{ alloc::ptr_free_deallocate(p, sizeof (T)); } \
};
__GC_SPECIALIZE(char, gc_alloc)
__GC_SPECIALIZE(int, gc_alloc)
__GC_SPECIALIZE(unsigned, gc_alloc)
__GC_SPECIALIZE(float, gc_alloc)
__GC_SPECIALIZE(double, gc_alloc)
__GC_SPECIALIZE(char, alloc)
__GC_SPECIALIZE(int, alloc)
__GC_SPECIALIZE(unsigned, alloc)
__GC_SPECIALIZE(float, alloc)
__GC_SPECIALIZE(double, alloc)
__GC_SPECIALIZE(char, single_client_gc_alloc)
__GC_SPECIALIZE(int, single_client_gc_alloc)
__GC_SPECIALIZE(unsigned, single_client_gc_alloc)
__GC_SPECIALIZE(float, single_client_gc_alloc)
__GC_SPECIALIZE(double, single_client_gc_alloc)
__GC_SPECIALIZE(char, single_client_alloc)
__GC_SPECIALIZE(int, single_client_alloc)
__GC_SPECIALIZE(unsigned, single_client_alloc)
__GC_SPECIALIZE(float, single_client_alloc)
__GC_SPECIALIZE(double, single_client_alloc)
#ifdef __STL_USE_STD_ALLOCATORS
???copy stuff from stl_alloc.h or remove it to a different file ???
#endif /* __STL_USE_STD_ALLOCATORS */
#endif /* _SGI_SOURCE */
#endif /* GC_ALLOC_H */

290
gc/include/gc_cpp.h Normal file
View File

@@ -0,0 +1,290 @@
#ifndef GC_CPP_H
#define GC_CPP_H
/****************************************************************************
Copyright (c) 1994 by Xerox Corporation. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
Permission is hereby granted to use or copy this program for any
purpose, provided the above notices are retained on all copies.
Permission to modify the code and to distribute modified code is
granted, provided the above notices are retained, and a notice that
the code was modified is included with the above copyright notice.
****************************************************************************
C++ Interface to the Boehm Collector
John R. Ellis and Jesse Hull
Last modified on Mon Jul 24 15:43:42 PDT 1995 by ellis
This interface provides access to the Boehm collector. It provides
basic facilities similar to those described in "Safe, Efficient
Garbage Collection for C++", by John R. Elis and David L. Detlefs
(ftp.parc.xerox.com:/pub/ellis/gc).
All heap-allocated objects are either "collectable" or
"uncollectable". Programs must explicitly delete uncollectable
objects, whereas the garbage collector will automatically delete
collectable objects when it discovers them to be inaccessible.
Collectable objects may freely point at uncollectable objects and vice
versa.
Objects allocated with the built-in "::operator new" are uncollectable.
Objects derived from class "gc" are collectable. For example:
class A: public gc {...};
A* a = new A; // a is collectable.
Collectable instances of non-class types can be allocated using the GC
placement:
typedef int A[ 10 ];
A* a = new (GC) A;
Uncollectable instances of classes derived from "gc" can be allocated
using the NoGC placement:
class A: public gc {...};
A* a = new (NoGC) A; // a is uncollectable.
Both uncollectable and collectable objects can be explicitly deleted
with "delete", which invokes an object's destructors and frees its
storage immediately.
A collectable object may have a clean-up function, which will be
invoked when the collector discovers the object to be inaccessible.
An object derived from "gc_cleanup" or containing a member derived
from "gc_cleanup" has a default clean-up function that invokes the
object's destructors. Explicit clean-up functions may be specified as
an additional placement argument:
A* a = ::new (GC, MyCleanup) A;
An object is considered "accessible" by the collector if it can be
reached by a path of pointers from static variables, automatic
variables of active functions, or from some object with clean-up
enabled; pointers from an object to itself are ignored.
Thus, if objects A and B both have clean-up functions, and A points at
B, B is considered accessible. After A's clean-up is invoked and its
storage released, B will then become inaccessible and will have its
clean-up invoked. If A points at B and B points to A, forming a
cycle, then that's considered a storage leak, and neither will be
collectable. See the interface gc.h for low-level facilities for
handling such cycles of objects with clean-up.
The collector cannot guarrantee that it will find all inaccessible
objects. In practice, it finds almost all of them.
Cautions:
1. Be sure the collector has been augmented with "make c++".
2. If your compiler supports the new "operator new[]" syntax, then
add -DOPERATOR_NEW_ARRAY to the Makefile.
If your compiler doesn't support "operator new[]", beware that an
array of type T, where T is derived from "gc", may or may not be
allocated as a collectable object (it depends on the compiler). Use
the explicit GC placement to make the array collectable. For example:
class A: public gc {...};
A* a1 = new A[ 10 ]; // collectable or uncollectable?
A* a2 = new (GC) A[ 10 ]; // collectable
3. The destructors of collectable arrays of objects derived from
"gc_cleanup" will not be invoked properly. For example:
class A: public gc_cleanup {...};
A* a = new (GC) A[ 10 ]; // destructors not invoked correctly
Typically, only the destructor for the first element of the array will
be invoked when the array is garbage-collected. To get all the
destructors of any array executed, you must supply an explicit
clean-up function:
A* a = new (GC, MyCleanUp) A[ 10 ];
(Implementing clean-up of arrays correctly, portably, and in a way
that preserves the correct exception semantics requires a language
extension, e.g. the "gc" keyword.)
4. Compiler bugs:
* Solaris 2's CC (SC3.0) doesn't implement t->~T() correctly, so the
destructors of classes derived from gc_cleanup won't be invoked.
You'll have to explicitly register a clean-up function with
new-placement syntax.
* Evidently cfront 3.0 does not allow destructors to be explicitly
invoked using the ANSI-conforming syntax t->~T(). If you're using
cfront 3.0, you'll have to comment out the class gc_cleanup, which
uses explicit invocation.
****************************************************************************/
#include "gc.h"
#ifndef THINK_CPLUS
#define _cdecl
#endif
#if ! defined( OPERATOR_NEW_ARRAY ) \
&& (__BORLANDC__ >= 0x450 || (__GNUC__ >= 2 && __GNUC_MINOR__ >= 6) \
|| __WATCOMC__ >= 1050)
# define OPERATOR_NEW_ARRAY
#endif
enum GCPlacement {GC, NoGC, PointerFreeGC};
class gc {public:
inline void* operator new( size_t size );
inline void* operator new( size_t size, GCPlacement gcp );
inline void operator delete( void* obj );
#ifdef OPERATOR_NEW_ARRAY
inline void* operator new[]( size_t size );
inline void* operator new[]( size_t size, GCPlacement gcp );
inline void operator delete[]( void* obj );
#endif /* OPERATOR_NEW_ARRAY */
};
/*
Instances of classes derived from "gc" will be allocated in the
collected heap by default, unless an explicit NoGC placement is
specified. */
class gc_cleanup: virtual public gc {public:
inline gc_cleanup();
inline virtual ~gc_cleanup();
private:
inline static void _cdecl cleanup( void* obj, void* clientData );};
/*
Instances of classes derived from "gc_cleanup" will be allocated
in the collected heap by default. When the collector discovers an
inaccessible object derived from "gc_cleanup" or containing a
member derived from "gc_cleanup", its destructors will be
invoked. */
extern "C" {typedef void (*GCCleanUpFunc)( void* obj, void* clientData );}
inline void* operator new(
size_t size,
GCPlacement gcp,
GCCleanUpFunc cleanup = 0,
void* clientData = 0 );
/*
Allocates a collectable or uncollected object, according to the
value of "gcp".
For collectable objects, if "cleanup" is non-null, then when the
allocated object "obj" becomes inaccessible, the collector will
invoke the function "cleanup( obj, clientData )" but will not
invoke the object's destructors. It is an error to explicitly
delete an object allocated with a non-null "cleanup".
It is an error to specify a non-null "cleanup" with NoGC or for
classes derived from "gc_cleanup" or containing members derived
from "gc_cleanup". */
#ifdef OPERATOR_NEW_ARRAY
inline void* operator new[](
size_t size,
GCPlacement gcp,
GCCleanUpFunc cleanup = 0,
void* clientData = 0 );
/*
The operator new for arrays, identical to the above. */
#endif /* OPERATOR_NEW_ARRAY */
/****************************************************************************
Inline implementation
****************************************************************************/
inline void* gc::operator new( size_t size ) {
return GC_MALLOC( size );}
inline void* gc::operator new( size_t size, GCPlacement gcp ) {
if (gcp == GC)
return GC_MALLOC( size );
else if (gcp == PointerFreeGC)
return GC_MALLOC_ATOMIC( size );
else
return GC_MALLOC_UNCOLLECTABLE( size );}
inline void gc::operator delete( void* obj ) {
GC_FREE( obj );}
#ifdef OPERATOR_NEW_ARRAY
inline void* gc::operator new[]( size_t size ) {
return gc::operator new( size );}
inline void* gc::operator new[]( size_t size, GCPlacement gcp ) {
return gc::operator new( size, gcp );}
inline void gc::operator delete[]( void* obj ) {
gc::operator delete( obj );}
#endif /* OPERATOR_NEW_ARRAY */
inline gc_cleanup::~gc_cleanup() {
GC_REGISTER_FINALIZER_IGNORE_SELF( GC_base(this), 0, 0, 0, 0 );}
inline void gc_cleanup::cleanup( void* obj, void* displ ) {
((gc_cleanup*) ((char*) obj + (ptrdiff_t) displ))->~gc_cleanup();}
inline gc_cleanup::gc_cleanup() {
GC_finalization_proc oldProc;
void* oldData;
void* base = GC_base( (void *) this );
if (0 == base) return;
GC_REGISTER_FINALIZER_IGNORE_SELF(
base, cleanup, (void*) ((char*) this - (char*) base),
&oldProc, &oldData );
if (0 != oldProc) {
GC_REGISTER_FINALIZER_IGNORE_SELF( base, oldProc, oldData, 0, 0 );}}
inline void* operator new(
size_t size,
GCPlacement gcp,
GCCleanUpFunc cleanup,
void* clientData )
{
void* obj;
if (gcp == GC) {
obj = GC_MALLOC( size );
if (cleanup != 0)
GC_REGISTER_FINALIZER_IGNORE_SELF(
obj, cleanup, clientData, 0, 0 );}
else if (gcp == PointerFreeGC) {
obj = GC_MALLOC_ATOMIC( size );}
else {
obj = GC_MALLOC_UNCOLLECTABLE( size );};
return obj;}
#ifdef OPERATOR_NEW_ARRAY
inline void* operator new[](
size_t size,
GCPlacement gcp,
GCCleanUpFunc cleanup,
void* clientData )
{
return ::operator new( size, gcp, cleanup, clientData );}
#endif /* OPERATOR_NEW_ARRAY */
#endif /* GC_CPP_H */

103
gc/include/gc_inl.h Normal file
View File

@@ -0,0 +1,103 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Boehm, October 3, 1995 2:07 pm PDT */
# ifndef GC_PRIVATE_H
# include "private/gc_priv.h"
# endif
/* USE OF THIS FILE IS NOT RECOMMENDED unless the collector has been */
/* compiled without -DALL_INTERIOR_POINTERS or with */
/* -DDONT_ADD_BYTE_AT_END, or the specified size includes a pointerfree */
/* word at the end. In the standard collector configuration, */
/* the final word of each object may not be scanned. */
/* This is most useful for compilers that generate C. */
/* Manual use is hereby discouraged. */
/* Allocate n words (NOT BYTES). X is made to point to the result. */
/* It is assumed that n < MAXOBJSZ, and */
/* that n > 0. On machines requiring double word alignment of some */
/* data, we also assume that n is 1 or even. This bypasses the */
/* MERGE_SIZES mechanism. In order to minimize the number of distinct */
/* free lists that are maintained, the caller should ensure that a */
/* small number of distinct values of n are used. (The MERGE_SIZES */
/* mechanism normally does this by ensuring that only the leading three */
/* bits of n may be nonzero. See misc.c for details.) We really */
/* recommend this only in cases in which n is a constant, and no */
/* locking is required. */
/* In that case it may allow the compiler to perform substantial */
/* additional optimizations. */
# define GC_MALLOC_WORDS(result,n) \
{ \
register ptr_t op; \
register ptr_t *opp; \
DCL_LOCK_STATE; \
\
opp = &(GC_objfreelist[n]); \
FASTLOCK(); \
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
FASTUNLOCK(); \
(result) = GC_generic_malloc_words_small((n), NORMAL); \
} else { \
*opp = obj_link(op); \
obj_link(op) = 0; \
GC_words_allocd += (n); \
FASTUNLOCK(); \
(result) = (GC_PTR) op; \
} \
}
/* The same for atomic objects: */
# define GC_MALLOC_ATOMIC_WORDS(result,n) \
{ \
register ptr_t op; \
register ptr_t *opp; \
DCL_LOCK_STATE; \
\
opp = &(GC_aobjfreelist[n]); \
FASTLOCK(); \
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
FASTUNLOCK(); \
(result) = GC_generic_malloc_words_small((n), PTRFREE); \
} else { \
*opp = obj_link(op); \
obj_link(op) = 0; \
GC_words_allocd += (n); \
FASTUNLOCK(); \
(result) = (GC_PTR) op; \
} \
}
/* And once more for two word initialized objects: */
# define GC_CONS(result, first, second) \
{ \
register ptr_t op; \
register ptr_t *opp; \
DCL_LOCK_STATE; \
\
opp = &(GC_objfreelist[2]); \
FASTLOCK(); \
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
FASTUNLOCK(); \
op = GC_generic_malloc_words_small(2, NORMAL); \
} else { \
*opp = obj_link(op); \
GC_words_allocd += 2; \
FASTUNLOCK(); \
} \
((word *)op)[0] = (word)(first); \
((word *)op)[1] = (word)(second); \
(result) = (GC_PTR) op; \
}

1
gc/include/gc_inline.h Normal file
View File

@@ -0,0 +1 @@
# include "gc_inl.h"

91
gc/include/gc_typed.h Normal file
View File

@@ -0,0 +1,91 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright 1996 Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/*
* Some simple primitives for allocation with explicit type information.
* Facilities for dynamic type inference may be added later.
* Should be used only for extremely performance critical applications,
* or if conservative collector leakage is otherwise a problem (unlikely).
* Note that this is implemented completely separately from the rest
* of the collector, and is not linked in unless referenced.
* This does not currently support GC_DEBUG in any interesting way.
*/
/* Boehm, May 19, 1994 2:13 pm PDT */
#ifndef _GC_TYPED_H
# define _GC_TYPED_H
# ifndef _GC_H
# include "gc.h"
# endif
typedef GC_word * GC_bitmap;
/* The least significant bit of the first word is one if */
/* the first word in the object may be a pointer. */
# define GC_get_bit(bm, index) \
(((bm)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
# define GC_set_bit(bm, index) \
(bm)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
typedef GC_word GC_descr;
GC_API GC_descr GC_make_descriptor GC_PROTO((GC_bitmap bm, size_t len));
/* Return a type descriptor for the object whose layout */
/* is described by the argument. */
/* The least significant bit of the first word is one */
/* if the first word in the object may be a pointer. */
/* The second argument specifies the number of */
/* meaningful bits in the bitmap. The actual object */
/* may be larger (but not smaller). Any additional */
/* words in the object are assumed not to contain */
/* pointers. */
/* Returns a conservative approximation in the */
/* (unlikely) case of insufficient memory to build */
/* the descriptor. Calls to GC_make_descriptor */
/* may consume some amount of a finite resource. This */
/* is intended to be called once per type, not once */
/* per allocation. */
GC_API GC_PTR GC_malloc_explicitly_typed
GC_PROTO((size_t size_in_bytes, GC_descr d));
/* Allocate an object whose layout is described by d. */
/* The resulting object MAY NOT BE PASSED TO REALLOC. */
GC_API GC_PTR GC_malloc_explicitly_typed_ignore_off_page
GC_PROTO((size_t size_in_bytes, GC_descr d));
GC_API GC_PTR GC_calloc_explicitly_typed
GC_PROTO((size_t nelements,
size_t element_size_in_bytes,
GC_descr d));
/* Allocate an array of nelements elements, each of the */
/* given size, and with the given descriptor. */
/* The elemnt size must be a multiple of the byte */
/* alignment required for pointers. E.g. on a 32-bit */
/* machine with 16-bit aligned pointers, size_in_bytes */
/* must be a multiple of 2. */
#ifdef GC_DEBUG
# define GC_MALLOC_EXPLICTLY_TYPED(bytes, d) GC_MALLOC(bytes)
# define GC_CALLOC_EXPLICTLY_TYPED(n, bytes, d) GC_MALLOC(n*bytes)
#else
# define GC_MALLOC_EXPLICTLY_TYPED(bytes, d) \
GC_malloc_explicitly_typed(bytes, d)
# define GC_CALLOC_EXPLICTLY_TYPED(n, bytes, d) \
GC_calloc_explicitly_typed(n, bytes, d)
#endif /* !GC_DEBUG */
#endif /* _GC_TYPED_H */

41
gc/include/javaxfc.h Normal file
View File

@@ -0,0 +1,41 @@
# ifndef GC_H
# include "gc.h"
# endif
/*
* Invoke all remaining finalizers that haven't yet been run.
* This is needed for strict compliance with the Java standard,
* which can make the runtime guarantee that all finalizers are run.
* This is problematic for several reasons:
* 1) It means that finalizers, and all methods calle by them,
* must be prepared to deal with objects that have been finalized in
* spite of the fact that they are still referenced by statically
* allocated pointer variables.
* 1) It may mean that we get stuck in an infinite loop running
* finalizers which create new finalizable objects, though that's
* probably unlikely.
* Thus this is not recommended for general use.
*/
void GC_finalize_all();
/*
* A version of GC_register_finalizer that allows the object to be
* finalized before the objects it references. This is again error
* prone, in that it makes it easy to accidentally reference finalized
* objects. Again, recommended only for JVM implementors.
*/
void GC_register_finalizer_no_order(GC_PTR obj,
GC_finalization_proc fn, GC_PTR cd,
GC_finalization_proc *ofn, GC_PTR * ocd);
void GC_debug_register_finalizer_no_order(GC_PTR obj,
GC_finalization_proc fn, GC_PTR cd,
GC_finalization_proc *ofn, GC_PTR * ocd);
#ifdef GC_DEBUG
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_debug_register_finalizer_no_order(p, f, d, of, od)
#else
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_register_finalizer_no_order(p, f, d, of, od)
#endif

View File

@@ -0,0 +1,7 @@
#define GC_DEBUG
#include "gc.h"
#define malloc(n) GC_MALLOC(n)
#define calloc(m,n) GC_MALLOC(m*n)
#define free(p) GC_FREE(p)
#define realloc(p,n) GC_REALLOC(n)
#define CHECK_LEAKS() GC_gcollect()

456
gc/include/new_gc_alloc.h Normal file
View File

@@ -0,0 +1,456 @@
/*
* Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
//
// This is a revision of gc_alloc.h for SGI STL versions > 3.0
// Unlike earlier versions, it supplements the standard "alloc.h"
// instead of replacing it.
//
// This is sloppy about variable names used in header files.
// It also doesn't yet understand the new header file names or
// namespaces.
//
// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE
// and -DALL_INTERIOR_POINTERS. We also recommend
// -DREDIRECT_MALLOC=GC_uncollectable_malloc.
//
// Some of this could be faster in the explicit deallocation case.
// In particular, we spend too much time clearing objects on the
// free lists. That could be avoided.
//
// This uses template classes with static members, and hence does not work
// with g++ 2.7.2 and earlier.
//
// Unlike its predecessor, this one simply defines
// gc_alloc
// single_client_gc_alloc
// traceable_alloc
// single_client_traceable_alloc
//
// It does not redefine alloc. Nor does it change the default allocator,
// though the user may wish to do so. (The argument against changing
// the default allocator is that it may introduce subtle link compatibility
// problems. The argument for changing it is that the usual default
// allocator is usually a very bad choice for a garbage collected environment.)
//
#ifndef GC_ALLOC_H
#include "gc.h"
#include <alloc.h>
#define GC_ALLOC_H
#include <stddef.h>
#include <string.h>
// The following need to match collector data structures.
// We can't include gc_priv.h, since that pulls in way too much stuff.
// This should eventually be factored out into another include file.
extern "C" {
extern void ** const GC_objfreelist_ptr;
extern void ** const GC_aobjfreelist_ptr;
extern void ** const GC_uobjfreelist_ptr;
extern void ** const GC_auobjfreelist_ptr;
extern void GC_incr_words_allocd(size_t words);
extern void GC_incr_mem_freed(size_t words);
extern char * GC_generic_malloc_words_small(size_t word, int kind);
}
// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
// AUNCOLLECTABLE in gc_priv.h.
enum { GC_PTRFREE = 0, GC_NORMAL = 1, GC_UNCOLLECTABLE = 2,
GC_AUNCOLLECTABLE = 3 };
enum { GC_max_fast_bytes = 255 };
enum { GC_bytes_per_word = sizeof(char *) };
enum { GC_byte_alignment = 8 };
enum { GC_word_alignment = GC_byte_alignment/GC_bytes_per_word };
inline void * &GC_obj_link(void * p)
{ return *(void **)p; }
// Compute a number of words >= n+1 bytes.
// The +1 allows for pointers one past the end.
inline size_t GC_round_up(size_t n)
{
return ((n + GC_byte_alignment)/GC_byte_alignment)*GC_word_alignment;
}
// The same but don't allow for extra byte.
inline size_t GC_round_up_uncollectable(size_t n)
{
return ((n + GC_byte_alignment - 1)/GC_byte_alignment)*GC_word_alignment;
}
template <int dummy>
class GC_aux_template {
public:
// File local count of allocated words. Occasionally this is
// added into the global count. A separate count is necessary since the
// real one must be updated with a procedure call.
static size_t GC_words_recently_allocd;
// Same for uncollectable mmory. Not yet reflected in either
// GC_words_recently_allocd or GC_non_gc_bytes.
static size_t GC_uncollectable_words_recently_allocd;
// Similar counter for explicitly deallocated memory.
static size_t GC_mem_recently_freed;
// Again for uncollectable memory.
static size_t GC_uncollectable_mem_recently_freed;
static void * GC_out_of_line_malloc(size_t nwords, int kind);
};
template <int dummy>
size_t GC_aux_template<dummy>::GC_words_recently_allocd = 0;
template <int dummy>
size_t GC_aux_template<dummy>::GC_uncollectable_words_recently_allocd = 0;
template <int dummy>
size_t GC_aux_template<dummy>::GC_mem_recently_freed = 0;
template <int dummy>
size_t GC_aux_template<dummy>::GC_uncollectable_mem_recently_freed = 0;
template <int dummy>
void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
{
GC_words_recently_allocd += GC_uncollectable_words_recently_allocd;
GC_non_gc_bytes +=
GC_bytes_per_word * GC_uncollectable_words_recently_allocd;
GC_uncollectable_words_recently_allocd = 0;
GC_mem_recently_freed += GC_uncollectable_mem_recently_freed;
GC_non_gc_bytes -=
GC_bytes_per_word * GC_uncollectable_mem_recently_freed;
GC_uncollectable_mem_recently_freed = 0;
GC_incr_words_allocd(GC_words_recently_allocd);
GC_words_recently_allocd = 0;
GC_incr_mem_freed(GC_mem_recently_freed);
GC_mem_recently_freed = 0;
return GC_generic_malloc_words_small(nwords, kind);
}
typedef GC_aux_template<0> GC_aux;
// A fast, single-threaded, garbage-collected allocator
// We assume the first word will be immediately overwritten.
// In this version, deallocation is not a noop, and explicit
// deallocation is likely to help performance.
template <int dummy>
class single_client_gc_alloc_template {
public:
static void * allocate(size_t n)
{
size_t nwords = GC_round_up(n);
void ** flh;
void * op;
if (n > GC_max_fast_bytes) return GC_malloc(n);
flh = GC_objfreelist_ptr + nwords;
if (0 == (op = *flh)) {
return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
}
*flh = GC_obj_link(op);
GC_aux::GC_words_recently_allocd += nwords;
return op;
}
static void * ptr_free_allocate(size_t n)
{
size_t nwords = GC_round_up(n);
void ** flh;
void * op;
if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
flh = GC_aobjfreelist_ptr + nwords;
if (0 == (op = *flh)) {
return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
}
*flh = GC_obj_link(op);
GC_aux::GC_words_recently_allocd += nwords;
return op;
}
static void deallocate(void *p, size_t n)
{
size_t nwords = GC_round_up(n);
void ** flh;
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
flh = GC_objfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
memset((char *)p + GC_bytes_per_word, 0,
GC_bytes_per_word * (nwords - 1));
*flh = p;
GC_aux::GC_mem_recently_freed += nwords;
}
}
static void ptr_free_deallocate(void *p, size_t n)
{
size_t nwords = GC_round_up(n);
void ** flh;
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
flh = GC_aobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_mem_recently_freed += nwords;
}
}
};
typedef single_client_gc_alloc_template<0> single_client_gc_alloc;
// Once more, for uncollectable objects.
template <int dummy>
class single_client_traceable_alloc_template {
public:
static void * allocate(size_t n)
{
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
void * op;
if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
flh = GC_uobjfreelist_ptr + nwords;
if (0 == (op = *flh)) {
return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
}
*flh = GC_obj_link(op);
GC_aux::GC_uncollectable_words_recently_allocd += nwords;
return op;
}
static void * ptr_free_allocate(size_t n)
{
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
void * op;
if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
flh = GC_auobjfreelist_ptr + nwords;
if (0 == (op = *flh)) {
return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
}
*flh = GC_obj_link(op);
GC_aux::GC_uncollectable_words_recently_allocd += nwords;
return op;
}
static void deallocate(void *p, size_t n)
{
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
flh = GC_uobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_uncollectable_mem_recently_freed += nwords;
}
}
static void ptr_free_deallocate(void *p, size_t n)
{
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
if (n > GC_max_fast_bytes) {
GC_free(p);
} else {
flh = GC_auobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
GC_aux::GC_uncollectable_mem_recently_freed += nwords;
}
}
};
typedef single_client_traceable_alloc_template<0> single_client_traceable_alloc;
template < int dummy >
class gc_alloc_template {
public:
static void * allocate(size_t n) { return GC_malloc(n); }
static void * ptr_free_allocate(size_t n)
{ return GC_malloc_atomic(n); }
static void deallocate(void *, size_t) { }
static void ptr_free_deallocate(void *, size_t) { }
};
typedef gc_alloc_template < 0 > gc_alloc;
template < int dummy >
class traceable_alloc_template {
public:
static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
static void * ptr_free_allocate(size_t n)
{ return GC_malloc_atomic_uncollectable(n); }
static void deallocate(void *p, size_t) { GC_free(p); }
static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
};
typedef traceable_alloc_template < 0 > traceable_alloc;
#ifdef _SGI_SOURCE
// We want to specialize simple_alloc so that it does the right thing
// for all pointerfree types. At the moment there is no portable way to
// even approximate that. The following approximation should work for
// SGI compilers, and perhaps some others.
# define __GC_SPECIALIZE(T,alloc) \
class simple_alloc<T, alloc> { \
public: \
static T *allocate(size_t n) \
{ return 0 == n? 0 : \
(T*) alloc::ptr_free_allocate(n * sizeof (T)); } \
static T *allocate(void) \
{ return (T*) alloc::ptr_free_allocate(sizeof (T)); } \
static void deallocate(T *p, size_t n) \
{ if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \
static void deallocate(T *p) \
{ alloc::ptr_free_deallocate(p, sizeof (T)); } \
};
__GC_SPECIALIZE(char, gc_alloc)
__GC_SPECIALIZE(int, gc_alloc)
__GC_SPECIALIZE(unsigned, gc_alloc)
__GC_SPECIALIZE(float, gc_alloc)
__GC_SPECIALIZE(double, gc_alloc)
__GC_SPECIALIZE(char, traceable_alloc)
__GC_SPECIALIZE(int, traceable_alloc)
__GC_SPECIALIZE(unsigned, traceable_alloc)
__GC_SPECIALIZE(float, traceable_alloc)
__GC_SPECIALIZE(double, traceable_alloc)
__GC_SPECIALIZE(char, single_client_gc_alloc)
__GC_SPECIALIZE(int, single_client_gc_alloc)
__GC_SPECIALIZE(unsigned, single_client_gc_alloc)
__GC_SPECIALIZE(float, single_client_gc_alloc)
__GC_SPECIALIZE(double, single_client_gc_alloc)
__GC_SPECIALIZE(char, single_client_traceable_alloc)
__GC_SPECIALIZE(int, single_client_traceable_alloc)
__GC_SPECIALIZE(unsigned, single_client_traceable_alloc)
__GC_SPECIALIZE(float, single_client_traceable_alloc)
__GC_SPECIALIZE(double, single_client_traceable_alloc)
#ifdef __STL_USE_STD_ALLOCATORS
__STL_BEGIN_NAMESPACE
template <class _T>
struct _Alloc_traits<_T, gc_alloc >
{
static const bool _S_instanceless = true;
typedef simple_alloc<_T, gc_alloc > _Alloc_type;
typedef __allocator<_T, gc_alloc > allocator_type;
};
inline bool operator==(const gc_alloc&,
const gc_alloc&)
{
return true;
}
inline bool operator!=(const gc_alloc&,
const gc_alloc&)
{
return false;
}
template <class _T>
struct _Alloc_traits<_T, single_client_gc_alloc >
{
static const bool _S_instanceless = true;
typedef simple_alloc<_T, single_client_gc_alloc > _Alloc_type;
typedef __allocator<_T, single_client_gc_alloc > allocator_type;
};
inline bool operator==(const single_client_gc_alloc&,
const single_client_gc_alloc&)
{
return true;
}
inline bool operator!=(const single_client_gc_alloc&,
const single_client_gc_alloc&)
{
return false;
}
template <class _T>
struct _Alloc_traits<_T, traceable_alloc >
{
static const bool _S_instanceless = true;
typedef simple_alloc<_T, traceable_alloc > _Alloc_type;
typedef __allocator<_T, traceable_alloc > allocator_type;
};
inline bool operator==(const traceable_alloc&,
const traceable_alloc&)
{
return true;
}
inline bool operator!=(const traceable_alloc&,
const traceable_alloc&)
{
return false;
}
template <class _T>
struct _Alloc_traits<_T, single_client_traceable_alloc >
{
static const bool _S_instanceless = true;
typedef simple_alloc<_T, single_client_traceable_alloc > _Alloc_type;
typedef __allocator<_T, single_client_traceable_alloc > allocator_type;
};
inline bool operator==(const single_client_traceable_alloc&,
const single_client_traceable_alloc&)
{
return true;
}
inline bool operator!=(const single_client_traceable_alloc&,
const single_client_traceable_alloc&)
{
return false;
}
__STL_END_NAMESPACE
#endif /* __STL_USE_STD_ALLOCATORS */
#endif /* _SGI_SOURCE */
#endif /* GC_ALLOC_H */

View File

@@ -0,0 +1,118 @@
/*
* Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Boehm, May 19, 1994 2:23 pm PDT */
# ifndef CORD_POSITION_H
/* The representation of CORD_position. This is private to the */
/* implementation, but the size is known to clients. Also */
/* the implementation of some exported macros relies on it. */
/* Don't use anything defined here and not in cord.h. */
# define MAX_DEPTH 48
/* The maximum depth of a balanced cord + 1. */
/* We don't let cords get deeper than MAX_DEPTH. */
struct CORD_pe {
CORD pe_cord;
size_t pe_start_pos;
};
/* A structure describing an entry on the path from the root */
/* to current position. */
typedef struct CORD_Pos {
size_t cur_pos;
int path_len;
# define CORD_POS_INVALID (0x55555555)
/* path_len == INVALID <==> position invalid */
const char *cur_leaf; /* Current leaf, if it is a string. */
/* If the current leaf is a function, */
/* then this may point to function_buf */
/* containing the next few characters. */
/* Always points to a valid string */
/* containing the current character */
/* unless cur_end is 0. */
size_t cur_start; /* Start position of cur_leaf */
size_t cur_end; /* Ending position of cur_leaf */
/* 0 if cur_leaf is invalid. */
struct CORD_pe path[MAX_DEPTH + 1];
/* path[path_len] is the leaf corresponding to cur_pos */
/* path[0].pe_cord is the cord we point to. */
# define FUNCTION_BUF_SZ 8
char function_buf[FUNCTION_BUF_SZ]; /* Space for next few chars */
/* from function node. */
} CORD_pos[1];
/* Extract the cord from a position: */
CORD CORD_pos_to_cord(CORD_pos p);
/* Extract the current index from a position: */
size_t CORD_pos_to_index(CORD_pos p);
/* Fetch the character located at the given position: */
char CORD_pos_fetch(CORD_pos p);
/* Initialize the position to refer to the give cord and index. */
/* Note that this is the most expensive function on positions: */
void CORD_set_pos(CORD_pos p, CORD x, size_t i);
/* Advance the position to the next character. */
/* P must be initialized and valid. */
/* Invalidates p if past end: */
void CORD_next(CORD_pos p);
/* Move the position to the preceding character. */
/* P must be initialized and valid. */
/* Invalidates p if past beginning: */
void CORD_prev(CORD_pos p);
/* Is the position valid, i.e. inside the cord? */
int CORD_pos_valid(CORD_pos p);
char CORD__pos_fetch(CORD_pos);
void CORD__next(CORD_pos);
void CORD__prev(CORD_pos);
#define CORD_pos_fetch(p) \
(((p)[0].cur_end != 0)? \
(p)[0].cur_leaf[(p)[0].cur_pos - (p)[0].cur_start] \
: CORD__pos_fetch(p))
#define CORD_next(p) \
(((p)[0].cur_pos + 1 < (p)[0].cur_end)? \
(p)[0].cur_pos++ \
: (CORD__next(p), 0))
#define CORD_prev(p) \
(((p)[0].cur_end != 0 && (p)[0].cur_pos > (p)[0].cur_start)? \
(p)[0].cur_pos-- \
: (CORD__prev(p), 0))
#define CORD_pos_to_index(p) ((p)[0].cur_pos)
#define CORD_pos_to_cord(p) ((p)[0].path[0].pe_cord)
#define CORD_pos_valid(p) ((p)[0].path_len != CORD_POS_INVALID)
/* Some grubby stuff for performance-critical friends: */
#define CORD_pos_chars_left(p) ((long)((p)[0].cur_end) - (long)((p)[0].cur_pos))
/* Number of characters in cache. <= 0 ==> none */
#define CORD_pos_advance(p,n) ((p)[0].cur_pos += (n) - 1, CORD_next(p))
/* Advance position by n characters */
/* 0 < n < CORD_pos_chars_left(p) */
#define CORD_pos_cur_char_addr(p) \
(p)[0].cur_leaf + ((p)[0].cur_pos - (p)[0].cur_start)
/* address of current character in cache. */
#endif

View File

@@ -0,0 +1,135 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Boehm, July 11, 1995 11:54 am PDT */
# ifndef GC_HEADERS_H
# define GC_HEADERS_H
typedef struct hblkhdr hdr;
# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
--> Get a real machine.
# endif
/*
* The 2 level tree data structure that is used to find block headers.
* If there are more than 32 bits in a pointer, the top level is a hash
* table.
*/
# if CPP_WORDSZ > 32
# define HASH_TL
# endif
/* Define appropriate out-degrees for each of the two tree levels */
# ifdef SMALL_CONFIG
# define LOG_BOTTOM_SZ 11
/* Keep top index size reasonable with smaller blocks. */
# else
# define LOG_BOTTOM_SZ 10
# endif
# ifndef HASH_TL
# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
# else
# define LOG_TOP_SZ 11
# endif
# define TOP_SZ (1 << LOG_TOP_SZ)
# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
* The bottom level index contains one of three kinds of values:
* 0 means we're not responsible for this block,
* or this is a block other than the first one in a free block.
* 1 < (long)X <= MAX_JUMP means the block starts at least
* X * HBLKSIZE bytes before the current address.
* A valid pointer points to a hdr structure. (The above can't be
* valid pointers due to the GET_MEM return convention.)
*/
struct bi * asc_link; /* All indices are linked in */
/* ascending order... */
struct bi * desc_link; /* ... and in descending order. */
word key; /* high order address bits. */
# ifdef HASH_TL
struct bi * hash_link; /* Hash chain link. */
# endif
} bottom_index;
/* extern bottom_index GC_all_nils; - really part of GC_arrays */
/* extern bottom_index * GC_top_index []; - really part of GC_arrays */
/* Each entry points to a bottom_index. */
/* On a 32 bit machine, it points to */
/* the index for a set of high order */
/* bits equal to the index. For longer */
/* addresses, we hash the high order */
/* bits to compute the index in */
/* GC_top_index, and each entry points */
/* to a hash chain. */
/* The last entry in each chain is */
/* GC_all_nils. */
# define MAX_JUMP (HBLKSIZE - 1)
# define HDR_FROM_BI(bi, p) \
((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
# ifndef HASH_TL
# define BI(p) (GC_top_index \
[(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
# ifdef SMALL_CONFIG
# define HDR(p) GC_find_header((ptr_t)(p))
# else
# define HDR(p) HDR_INNER(p)
# endif
# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
# else /* hash */
/* Hash function for tree top level */
# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
/* Set bottom_indx to point to the bottom index for address p */
# define GET_BI(p, bottom_indx) \
{ \
register word hi = \
(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
\
while (_bi -> key != hi && _bi != GC_all_nils) \
_bi = _bi -> hash_link; \
(bottom_indx) = _bi; \
}
# define GET_HDR_ADDR(p, ha) \
{ \
register bottom_index * bi; \
\
GET_BI(p, bi); \
(ha) = &(HDR_FROM_BI(bi, p)); \
}
# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
(hhdr) = *_ha; }
# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
*_ha = (hhdr); }
# define HDR(p) GC_find_header((ptr_t)(p))
# endif
/* Is the result a forwarding address to someplace closer to the */
/* beginning of the block or NIL? */
# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((unsigned long) (hhdr) <= MAX_JUMP)
/* Get an HBLKSIZE aligned address closer to the beginning of the block */
/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (unsigned long)(hhdr))
# endif /* GC_HEADERS_H */

1748
gc/include/private/gc_priv.h Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

221
gc/include/weakpointer.h Normal file
View File

@@ -0,0 +1,221 @@
#ifndef _weakpointer_h_
#define _weakpointer_h_
/****************************************************************************
WeakPointer and CleanUp
Copyright (c) 1991 by Xerox Corporation. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
Permission is hereby granted to copy this code for any purpose,
provided the above notices are retained on all copies.
Last modified on Mon Jul 17 18:16:01 PDT 1995 by ellis
****************************************************************************/
/****************************************************************************
WeakPointer
A weak pointer is a pointer to a heap-allocated object that doesn't
prevent the object from being garbage collected. Weak pointers can be
used to track which objects haven't yet been reclaimed by the
collector. A weak pointer is deactivated when the collector discovers
its referent object is unreachable by normal pointers (reachability
and deactivation are defined more precisely below). A deactivated weak
pointer remains deactivated forever.
****************************************************************************/
template< class T > class WeakPointer {
public:
WeakPointer( T* t = 0 )
/* Constructs a weak pointer for *t. t may be null. It is an error
if t is non-null and *t is not a collected object. */
{impl = _WeakPointer_New( t );}
T* Pointer()
/* wp.Pointer() returns a pointer to the referent object of wp or
null if wp has been deactivated (because its referent object
has been discovered unreachable by the collector). */
{return (T*) _WeakPointer_Pointer( this->impl );}
int operator==( WeakPointer< T > wp2 )
/* Given weak pointers wp1 and wp2, if wp1 == wp2, then wp1 and
wp2 refer to the same object. If wp1 != wp2, then either wp1
and wp2 don't refer to the same object, or if they do, one or
both of them has been deactivated. (Note: If objects t1 and t2
are never made reachable by their clean-up functions, then
WeakPointer<T>(t1) == WeakPointer<T>(t2) if and only t1 == t2.) */
{return _WeakPointer_Equal( this->impl, wp2.impl );}
int Hash()
/* Returns a hash code suitable for use by multiplicative- and
division-based hash tables. If wp1 == wp2, then wp1.Hash() ==
wp2.Hash(). */
{return _WeakPointer_Hash( this->impl );}
private:
void* impl;
};
/*****************************************************************************
CleanUp
A garbage-collected object can have an associated clean-up function
that will be invoked some time after the collector discovers the
object is unreachable via normal pointers. Clean-up functions can be
used to release resources such as open-file handles or window handles
when their containing objects become unreachable. If a C++ object has
a non-empty explicit destructor (i.e. it contains programmer-written
code), the destructor will be automatically registered as the object's
initial clean-up function.
There is no guarantee that the collector will detect every unreachable
object (though it will find almost all of them). Clients should not
rely on clean-up to cause some action to occur immediately -- clean-up
is only a mechanism for improving resource usage.
Every object with a clean-up function also has a clean-up queue. When
the collector finds the object is unreachable, it enqueues it on its
queue. The clean-up function is applied when the object is removed
from the queue. By default, objects are enqueued on the garbage
collector's queue, and the collector removes all objects from its
queue after each collection. If a client supplies another queue for
objects, it is his responsibility to remove objects (and cause their
functions to be called) by polling it periodically.
Clean-up queues allow clean-up functions accessing global data to
synchronize with the main program. Garbage collection can occur at any
time, and clean-ups invoked by the collector might access data in an
inconsistent state. A client can control this by defining an explicit
queue for objects and polling it at safe points.
The following definitions are used by the specification below:
Given a pointer t to a collected object, the base object BO(t) is the
value returned by new when it created the object. (Because of multiple
inheritance, t and BO(t) may not be the same address.)
A weak pointer wp references an object *t if BO(wp.Pointer()) ==
BO(t).
***************************************************************************/
template< class T, class Data > class CleanUp {
public:
static void Set( T* t, void c( Data* d, T* t ), Data* d = 0 )
/* Sets the clean-up function of object BO(t) to be <c, d>,
replacing any previously defined clean-up function for BO(t); c
and d can be null, but t cannot. Sets the clean-up queue for
BO(t) to be the collector's queue. When t is removed from its
clean-up queue, its clean-up will be applied by calling c(d,
t). It is an error if *t is not a collected object. */
{_CleanUp_Set( t, c, d );}
static void Call( T* t )
/* Sets the new clean-up function for BO(t) to be null and, if the
old one is non-null, calls it immediately, even if BO(t) is
still reachable. Deactivates any weak pointers to BO(t). */
{_CleanUp_Call( t );}
class Queue {public:
Queue()
/* Constructs a new queue. */
{this->head = _CleanUp_Queue_NewHead();}
void Set( T* t )
/* q.Set(t) sets the clean-up queue of BO(t) to be q. */
{_CleanUp_Queue_Set( this->head, t );}
int Call()
/* If q is non-empty, q.Call() removes the first object and
calls its clean-up function; does nothing if q is
empty. Returns true if there are more objects in the
queue. */
{return _CleanUp_Queue_Call( this->head );}
private:
void* head;
};
};
/**********************************************************************
Reachability and Clean-up
An object O is reachable if it can be reached via a non-empty path of
normal pointers from the registers, stacks, global variables, or an
object with a non-null clean-up function (including O itself),
ignoring pointers from an object to itself.
This definition of reachability ensures that if object B is accessible
from object A (and not vice versa) and if both A and B have clean-up
functions, then A will always be cleaned up before B. Note that as
long as an object with a clean-up function is contained in a cycle of
pointers, it will always be reachable and will never be cleaned up or
collected.
When the collector finds an unreachable object with a null clean-up
function, it atomically deactivates all weak pointers referencing the
object and recycles its storage. If object B is accessible from object
A via a path of normal pointers, A will be discovered unreachable no
later than B, and a weak pointer to A will be deactivated no later
than a weak pointer to B.
When the collector finds an unreachable object with a non-null
clean-up function, the collector atomically deactivates all weak
pointers referencing the object, redefines its clean-up function to be
null, and enqueues it on its clean-up queue. The object then becomes
reachable again and remains reachable at least until its clean-up
function executes.
The clean-up function is assured that its argument is the only
accessible pointer to the object. Nothing prevents the function from
redefining the object's clean-up function or making the object
reachable again (for example, by storing the pointer in a global
variable).
If the clean-up function does not make its object reachable again and
does not redefine its clean-up function, then the object will be
collected by a subsequent collection (because the object remains
unreachable and now has a null clean-up function). If the clean-up
function does make its object reachable again and a clean-up function
is subsequently redefined for the object, then the new clean-up
function will be invoked the next time the collector finds the object
unreachable.
Note that a destructor for a collected object cannot safely redefine a
clean-up function for its object, since after the destructor executes,
the object has been destroyed into "raw memory". (In most
implementations, destroying an object mutates its vtbl.)
Finally, note that calling delete t on a collected object first
deactivates any weak pointers to t and then invokes its clean-up
function (destructor).
**********************************************************************/
extern "C" {
void* _WeakPointer_New( void* t );
void* _WeakPointer_Pointer( void* wp );
int _WeakPointer_Equal( void* wp1, void* wp2 );
int _WeakPointer_Hash( void* wp );
void _CleanUp_Set( void* t, void (*c)( void* d, void* t ), void* d );
void _CleanUp_Call( void* t );
void* _CleanUp_Queue_NewHead ();
void _CleanUp_Queue_Set( void* h, void* t );
int _CleanUp_Queue_Call( void* h );
}
#endif /* _weakpointer_h_ */