On 3/14/06, Chris Lattner <sabre at nondot.org>
wrote:>
> > How exactly does this indicate X is no longer live? Is this internal
> > code-generator logic/magic?
>
> No, this just prevents the GC from accidentally thinking that *X is live
> through that pointer. The collector cannot distinguish between
> pointers that are out of scope from those that aren't, so this lets it
> consider all pointers to be in scope, without causing any "dead"
pointers
> to mark objects.
Does this mean some pointers from the roots might be null? I figured
only in-scope variables/roots would be in the llvm_gc_root_chain. Does
this list ever shorten?
> I would suggest doing experiments to determine which has the higher
> overhead. Refcounting as a whole is expensive, so you need to find a
> design that fits your constraints/application.
I don't actually have an application at the moment; I'm just learning
about garbage collection. Reference counting seemed the most
straightforward at the time, and LLVM provided an environment free of
any superfluous distractions in which to learn and experiment with the
raw algorithms.
I've attached the lazy ref-counting collector if anyone's interested.
There are a few suggested enhancements in the source for those who
might want to extend it, such as bounds for real-time constraints, or
adding generations to reduce the overhead of filtering out the live
roots. :-)
Sandro
-------------- next part --------------
/*===-- refcount.c - Simple refcounting garbage collector ---------===*\
|*
|* The LLVM Compiler Infrastructure
|*
|* This file was developed by the LLVM research group and is distributed under
|* the University of Illinois Open Source License. See LICENSE.TXT for details.
|*
|*===----------------------------------------------------------------------===*|
|*
|* This is a simple lazy reference counting collector. Two lists track
|* references which are potentially garbage. The 'scan' list accumulates
|* references which are thought to be garbage because the refcount is zero. The
|* 'deferred' list accumulates references which are known to be
referenced from
|* roots.
|*
|* During collection, the walk_roots callback filters the 'scan' list
and moves
|* entries to 'deferred'. At the end of a collection cycle,
'deferred' and 'scan'
|* (which is now empty) are swapped.
|*
|* Reference counts are maintained via a write barrier. A collection is run on
|* every allocation if 'scan' is not empty.
|*
\*===----------------------------------------------------------------------===*/
#include "../GCInterface.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
/* Possible Enhancements:
* 1. A full collection of the list is default, and an iteration bound is
possible
* (for real-time constraints?), but this involves moving elements between
* 'deferred' and 'scan' rather than the currently simple swap
operation.
*
* 2. Generational ref-counting: the 'flags' field has many unused bits
which can
* be used to track how many collections an object has survived. Older objects
can
* can be collected less often (or placed in different lists?) which should
reduce
* the collection overhead (particularly the filtering).
*/
/* this is a header for each allocated block of space which tracks the refcount,
* and when the ref count is 0, it is linked into a list of scheduled deletions.
*/
typedef struct Header {
unsigned flags; /* book-keeping info */
union {
unsigned count;
struct Header *next;
} field;
} BlockHeader;
/* flag indicates the union contains a pointer */
#define POINTER_FLAG 0x01
#define IS_POINTER(header) (header->flags & POINTER_FLAG)
#define SET_POINTER(header) header->flags |= POINTER_FLAG;
#define CLEAR_POINTER(header) header->flags &= ~POINTER_FLAG;
#define LINK(root, block) \
block->field.next = root;\
root = block;
#define UNLINK(entry, prev) \
prev->field.next = entry->field.next;
#define FIND(head, entry, i) \
for (i=head; i != NULL && i->field.next == entry; i=i->field.next)
{}
/* list of scheduled deletions (refcounts == 0) */
static BlockHeader *scan;
/* items which became invalidated during the current collection, and
* thus must wait for the next cycle to be collected.
*/
static BlockHeader *deferred;
void defer(void **Root, void *Meta) __attribute__((always_inline));
void reset() __attribute__((always_inline));
void refcnt_inc(void *obj) __attribute__((always_inline));
void refcnt_dec(void *obj) __attribute__((always_inline));
/* if the object pointed to by the Root is scheduled for collection,
* defer it to the next collection cycle (this will continue until
* root falls out of scope).
*/
void defer(void **Root, void *Meta) {
BlockHeader *head= (BlockHeader *)*Root;
if (scan == head) {
scan = scan->field.next;
LINK(deferred, head);
} else {
BlockHeader *i, *j;
for (i=scan->field.next, j=scan; i != NULL; j = i, i = i->field.next)
{
if (i == head) { /*can't be the head of the list */
UNLINK(i, j);
LINK(deferred, i);
return;
}
}
}
}
/* Swaps the two lists and starts over; the roots scheduled to be
* collected last cycle, but which were deferred, are now first on the
* list. They may be deferred again if the roots are still live.
*/
void reset() {
if (scan != NULL) {
/* if scan list still contains elements, find the last one and append
'deferred' to it */
BlockHeader *head;
for (head = scan; head->field.next != NULL; head = head->field.next)
{}
head->field.next = deferred;
}
scan = deferred;
deferred = NULL;
}
/* return the pointer to the allocation count for each object */
#define header_get(obj) (BlockHeader *)(obj - sizeof(BlockHeader))
#define obj_get(header) (void *)((void *)header + sizeof(BlockHeader))
/* increment the refcount in the header */
void refcnt_inc(void *obj) {
if (obj != NULL) {
BlockHeader *header = header_get(obj);
unsigned *count = &header->field.count;
if (IS_POINTER(header)) {
BlockHeader *prev;
FIND(scan, header, prev); /* FIND entry in list, and remove it. */
if (prev != NULL) {
UNLINK(prev, header);
}
CLEAR_POINTER(header);
*count = 1;
} else {
++(*count);
}
}
}
/* decrement the refcount in the header */
void refcnt_dec(void *obj) {
if (obj != NULL) {
BlockHeader *header = header_get(obj);
unsigned *count = &header->field.count;
--(*count);
if (*count == 0) {
LINK(scan, header); /* if refcount is 0, schedule obj to be collected */
SET_POINTER(header);
}
}
}
/* llvm_gc_initialize - do nothing
*/
void llvm_gc_initialize(unsigned InitialHeapSize) {
}
/* We always want to inline the fast path, but never want to inline the
* slow path.
*/
void *llvm_gc_allocate(unsigned Size) __attribute__((always_inline));
static void* llvm_gc_alloc_slow(unsigned Size) __attribute__((noinline));
void *llvm_gc_allocate(unsigned Size) {
unsigned Actual = Size + sizeof(BlockHeader);
if (scan != NULL) {
llvm_gc_collect(); /* run a bounded collection before every allocation */
}
BlockHeader *space = (BlockHeader *)malloc(Actual);
if (space == NULL) {
space = llvm_gc_alloc_slow(Actual);
}
space->field.count = 0;
/*advance the pointer past the object refcount*/
return (space + sizeof(BlockHeader));
}
static void *llvm_gc_alloc_slow(unsigned Actual) {
void *space;
llvm_gc_collect();
space = malloc(Actual); /* what happens if it fails again? */
assert(space != NULL); /* abort if still can't allocate after collection
*/
return space;
}
void llvm_gc_collect() {
/*printf("Garbage collecting!!\n");*/
if (scan != NULL) {
BlockHeader *head;
llvm_cg_walk_gcroots(defer);
head = scan;
scan = NULL;
reset(); /* 'scan' now contains the list of deferred refs */
for (; head != NULL; head = head->field.next) {
free(head);
/* FIXME: this should also decrement the ref count of all pointers within
* head, which places them in the 'scan' list.
*/
}
}
}
/* We use no read barrier */
void *llvm_gc_read(void *ObjPtr, void **FieldPtr) { return *FieldPtr; }
/* write barrier to increment and decrement refcounts */
void llvm_gc_write(void *V, void *ObjPtr, void **FieldPtr) {
if (ObjPtr != V) { /* skip all this work if simply resetting the field */
void *obj = *FieldPtr;
refcnt_dec(obj);
*FieldPtr = V;
refcnt_inc(V);
}
}
/*===----------------------------------------------------------------------===**
* FIXME: This should be in a code-generator specific library, but for now this
* will work for all code generators.
*/
typedef struct GCRoot {
void **RootPtr;
void *Meta;
} GCRoot;
typedef struct GCRoots {
struct GCRoots *Next;
unsigned NumRoots;
GCRoot RootRecords[];
} GCRoots;
GCRoots *llvm_gc_root_chain;
void llvm_cg_walk_gcroots(void (*FP)(void **Root, void *Meta)) {
GCRoots *R = llvm_gc_root_chain;
for (; R; R = R->Next) {
unsigned i, e;
for (i = 0, e = R->NumRoots; i != e; ++i)
FP(R->RootRecords[i].RootPtr, R->RootRecords[i].Meta);
}
}
/* END FIXME! */