/*
 * Copyright (C) 2008,2009,2010,2011  Paulo Cesar Pereira de Andrade.
 *
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This software is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * Authors:
 *	Paulo Cesar Pereira de Andrade
 */

#include "exl.h"
#if JITVM
#  include "ejit.h"
#endif

/*
 * if USE_SEMAPHORE is set to zero, it will use the method described in:
 *	David R. Butenhof. Programming with POSIX Threads. Addison-Wesley. ISBN 0-201-63392-2.
 * otherwise, it will use another custom method that should not spin
 * and wait reading a volatile variable, but unfortunately, it appears
 * to not be always reliable, and/or is dependent on Linux kernel version.
 *
 * note that USE_SEMAPHORE is desirable if debugging with helgrind as
 * helgrind will understand the semaphore semantics and not fill the
 * default 1000+ warnings and tell you to go fix your program :-)
 *
 * maybe USE_SEMAPHORE just needs some way to "flush state", possibly just
 * reading once the value of a volatile variable?
 * (I want to believe there is no way for a thread to somehow "escape" from
 * pthread_mutex_lock() due to the sem_wait() and sem_post()'s in the
 * signal handler...)
 * but there is something weird going on, as it requires 3 semaphores, or
 * will sometimes deadlock (probably when it is waiting for gc_mutex)
 */
#define USE_SEMAPHORE		0

#include <stdlib.h>
#include <signal.h>
#if USE_SEMAPHORE
#include <semaphore.h>
#endif

/*
 * Defines
 */
/* topmost bit reserved */
#define TY_MASK			0x0fffffff
#define RE_MASK			0x10000000
#define VE_MASK			0x20000000
#define CT_MASK			0x40000000
#define NO_MASK			0x80000000	/* reserved/unused bit */

/* Use next field to mark&sweep bit.
 * Note that this has a very fragile logic depending on how the
 * next and prev fields are accessed, but has the great benefit of
 * only having it set on code called from gc()
 */
#if !GC_DEBUG
#  define mark(memory)							\
    (memory)->next = (ememory_t *)((long)(memory)->next | 1)
#endif
#define clear(memory)							\
    (memory)->next = (ememory_t *)((long)(memory)->next & ~1)
#define mark_p(memory)							\
    ((long)(memory)->next & 1)

#define type_p(memory)		((memory)->header & TY_MASK)
#define vector_p(memory)	((memory)->header & VE_MASK)
#define pointer_p(memory)	((memory)->header & RE_MASK)

#define null_memory		((ememory_t *)-sizeof(ememory_t))

#define memory_to_object(type, memory)					\
    (type)((eint8_t *)(memory) + sizeof(ememory_t))
#define object_to_memory(object)					\
    (ememory_t *)((eint8_t *)(object) - sizeof(ememory_t))
#define memory_to_field(type, field, memory)				\
    (((type)((eint8_t *)(memory) + sizeof(ememory_t)))->field)
#define memory_to_field_to_memory(type, field, memory)			\
    object_to_memory(memory_to_field(type, field, memory))

#define gc_lock()		emutex_lock(&gc_mutex)
#define gc_unlock()		emutex_unlock(&gc_mutex)

/*
 * Types
 */
typedef struct ememory	ememory_t;

struct ememory {
#if __WORDSIZE == 64
    eobject_t	 align;
#endif
    ememory_t	*prev;
    ememory_t	*next;
    eint32_t	 length;
    eint32_t	 header;
};

/*
 * Initialization
 */
ethread_t		  *tmain;
void			(**ebs)(eint32_t);
eint32_t		   ebs_idx, ebs_len;
eobject_t		  *ecs;
eint32_t		   ecs_idx, ecs_len;
eobject_t		  *eds;
eint32_t		   eds_idx, eds_len;
ertti_t			  *ets;
eint32_t		   ets_idx, ets_len;
eobject_t		  *els;
eint32_t		   els_idx, els_len;

erecord_t		 **record_vector;

#if HAVE_THREAD
pthread_mutex_t		   ethread_mutex;
#endif

#if GC_DEBUG
eint32_t		   gc_debug;
#endif

eint32_t		   gc_disable;

static ememory_t 	  *root;
static eint32_t	 	   gc_bytes;
static eobject_t 	   root_list;
#if HAVE_THREAD
static pthread_mutex_t	   gc_mutex;
#if USE_SEMAPHORE
/* need to use multiple semaphores or it may dead lock */
static sem_t		   enter_sem;
static sem_t		   sleep_sem;
static sem_t		   leave_sem;
#else
static volatile eint32_t   sr_int;
static sigset_t		   sr_set;
#endif
#endif

#if DEBUG
static volatile ebool_t	   gc_check;
int			   vm_running;
#endif

/*
 * Prototypes
 */
#if HAVE_THREAD
static void
suspend_handler(int unused);
#endif

static void
sigfpe_handler(int unused);

#if HAVE_THREAD
#if !USE_SEMAPHORE
static void
resume_handler(int unused);
#endif
#endif

static eobject_t
copy_tree(eobject_t list);

#if GC_DEBUG
static void
mark(ememory_t *memory);
#endif

static void
gc(void);

static void
gc_mark_hash(ehash_t *hash);

static void
gc_mark_entry(eentry_t *entry);

static void
gc_mark_cons(eobject_t object);

static void
gc_mark_function(efunction_t *function);

static void
gc_mark_ast(east_list_t *ast);

static void
gc_mark_label(elabel_t *label);

#if JITVM
static void
gc_mark_jit_state(jit_state_t *state);
#endif

static void
gc_mark(ememory_t *memory);

static void
gc_mark_mpcache(eobject_t *entries, eobject_t indexes, eint32_t length);

static void
gc_mark_thread(ethread_t *thread);

static void
gc_mark_roots(void);

/*
 * Implementation
 */
void
init_object(void)
{
#if HAVE_THREAD
    sigset_t		set;
    struct sigaction	handler;

    emutex_init(&ethread_mutex);
    emutex_init(&gc_mutex);

#if USE_SEMAPHORE
    if (sem_init(&enter_sem, 0, 0) || sem_init(&sleep_sem, 0, 0) ||
	sem_init(&leave_sem, 0, 0))
	eerror("sem_init: %s", strerror(errno));
#endif

    /* FIXME this should be a noop */
    pthread_sigmask(SIG_SETMASK, null, &set);
    sigdelset(&set, SUSPEND_SIGNAL);
#if !USE_SEMAPHORE
    sigdelset(&set, RESUME_SIGNAL);
#endif
    pthread_sigmask(SIG_SETMASK, &set, null);

#if !USE_SEMAPHORE
    sigfillset(&sr_set);
    sigdelset(&sr_set, RESUME_SIGNAL);
#endif

    handler.sa_flags = 0;
    sigemptyset(&handler.sa_mask);
    handler.sa_handler = suspend_handler;
    if (sigaction(SUSPEND_SIGNAL, &handler, null))
	eerror("sigaction: %s", strerror(errno));

#if !USE_SEMAPHORE
    handler.sa_flags = 0;
    sigemptyset(&handler.sa_mask);
    handler.sa_handler = resume_handler;
    if (sigaction(RESUME_SIGNAL, &handler, null))
	eerror("sigaction: %s", strerror(errno));
#endif
#endif

    /* duplicate of enew_thread() without need to check if tmain exists */
    enew_object((eobject_t *)&tmain, t_thread, sizeof(ethread_t));

#if HAVE_THREAD
    thread_self = tmain;
    tmain->next = tmain;
#endif

    tmain->tryoff = -1;
    tmain->len = 65536;
    enew_object((eobject_t *)&tmain->ess, t_resize | t_void,
		tmain->len * sizeof(evalue_t));
    tmain->ebp = tmain->esp = tmain->ess;
    tmain->gsl = 16;
    enew_object((eobject_t *)&tmain->egs, t_resize | t_void,
		tmain->gsl * sizeof(eobject_t));
    tmain->gsi = 0;
    tmain->flt = cfg_float_format;
    tmain->prc = cfg_mpfr_prc;
    tmain->rnd = cfg_mpfr_rnd;
    cqq_init(&tmain->qq);
    mpc_init2(&tmain->c, cfg_mpfr_prc);
    mpq_init(&tmain->q0);
    mpf_init(&tmain->f0);
    mpc_init2(&tmain->c0, cfg_mpfr_prc);
    mpq_init(&tmain->q1);
    mpq_init(&tmain->q2);
    mpq_init(&tmain->q3);
    empcache_setup();

    evector(&tmain->vec, t_uint8, BUFSIZ, 1);

#if HAVE_THREAD
    /* make tests simpler before executing bytecode */
    tmain->pthread = pthread_self();
#endif

    ebs_len = 4096;
    enew_object((eobject_t *)&ebs, t_resize | t_void,
		ebs_len * sizeof(eobject_t));
    ebs_idx = 0;

    ecs_len = 4096;
    enew_object((eobject_t *)&ecs, t_resize | t_void,
		ecs_len * sizeof(eobject_t));
    ecs_idx = 0;

    eds_len = 4096;
    enew_object((eobject_t *)&eds, t_resize | t_void,
		eds_len * sizeof(eobject_t));
    eds_idx = 0;

    ets_len = 4096;
    enew_object((eobject_t *)&ets, t_resize | t_void,
		ets_len * sizeof(ertti_t));
    ets_idx = 0;
    enew_object((eobject_t *)&record_vector, t_resize | t_void,
		ets_len * sizeof(eobject_t));

    els_len = 4096;
    enew_object((eobject_t *)&els, t_resize | t_void,
		els_len * sizeof(eobject_t));
    els_idx = 0;

    /* generic parser signal handler */
    signal(SIGFPE, sigfpe_handler);
    if (sigsetjmp(tmain->env, 1))
	ecfg_error(parse, "parser unhandled exception");

    /* exception can be overriden during constant folding */
    memcpy(&cfg_env, &tmain->env, sizeof(sigjmp_buf));
}

void
eadd_root(eobject_t *object)
{
    /* root_list contents are addresses of managed pointers, and cannot
     * be inspected and/or header bits modified */
    ++gc_disable;
    root_list = econs(object, root_list);
    --gc_disable;
}

void
erem_root(eobject_t *object)
{
    eobject_t	*list;
    eobject_t	*prev;

    for (prev = list = root_list; list; prev = list, list = ecdr(list)) {
	if (ecar(list) == object) {
	    if (prev == list)
		root_list = ecdr(root_list);
	    else
		erplacd(prev, ecdr(list));
	    break;
	}
    }
}

#if HAVE_THREAD
static void
suspend_handler(int unused)
{
#if USE_SEMAPHORE
    if (sem_post(&enter_sem)) {
	write(STDERR_FILENO, "sem_post: failed\n", 17);
	_exit(EXIT_FAILURE);
    }
    while (sem_wait(&sleep_sem)) {
	/* FIXME is this check safe? can this happen? */
	if (errno != EINTR) {
	    write(STDERR_FILENO, "sem_wait: failed\n", 17);
	    _exit(EXIT_FAILURE);
	}
    }
    if (sem_post(&leave_sem)) {
	write(STDERR_FILENO, "sem_post: failed\n", 17);
	_exit(EXIT_FAILURE);
    }
#else
    sr_int = 1;
    /* wait resume signal */
    sigsuspend(&sr_set);
#endif
}
#endif

static void
sigfpe_handler(int unused)
{
    siglongjmp(tmain->env, 1);
}

#if HAVE_THREAD
#if !USE_SEMAPHORE
static void
resume_handler(int unused)
{
    sr_int = 1;
    /* returning from resume_handler() also means returing from sigsuspend() */
}
#endif

void
enew_thread(eobject_t *pointer, eint32_t length)
{
    ethread_t	*next;
    ethread_t	*thread;

    /* pointer must be thread_self->obj */
    enew_object(pointer, t_thread, sizeof(ethread_t));
    thread = *(ethread_t **)pointer;

    thread->tryoff = -1;

    if (length < 256)
	length = 256;
    thread->len = length;
    enew_object((eobject_t *)&thread->ess, t_resize | t_void,
		thread->len * sizeof(evalue_t));

    /* helgrind will cause a possible data race warning, when
     * checking if thread_>esp is not null in gc_mark_thread().
     * the warning is harmless as, if it is null, it is not
     * a running thread, and gc() should have triggered when
     * allocating the thread structure, or the thread stack */
    thread->ebp = thread->esp = thread->ess;

    thread->gsl = 16;
    enew_object((eobject_t *)&thread->egs, t_resize | t_void,
		thread->gsl * sizeof(eobject_t));
    thread->gsi = 0;
    thread->prc = cfg_mpfr_prc;
    thread->rnd = cfg_mpfr_rnd;
    tmain->flt = cfg_float_format;
    cqq_init(&thread->qq);
    mpc_init2(&thread->c, cfg_mpfr_prc);
    mpq_init(&thread->q0);
    mpf_init(&thread->f0);
    mpc_init2(&thread->c0, cfg_mpfr_prc);
    mpq_init(&thread->q1);
    mpq_init(&thread->q2);
    mpq_init(&thread->q3);

    enew_vector(&thread->vec, t_uint8, BUFSIZ, 1);

    /* add thread to "end" of circular list */
    ethreads_lock();
    thread->next = thread_self;
    for (next = thread_self->next; next->next != thread_self; next = next->next)
	;
    next->next = thread;
    ethreads_unlock();
}
#endif

void
enew_object(eobject_t *pointer, etype_t type, eint32_t length)
{
    eint32_t	 bytes;
    ememory_t	*memory;

    bytes = sizeof(ememory_t) + ((length + 15) & ~15);

    /* use only signed memory sized blocks */
    if (bytes < 0)
	/* FIXME throw exception */
	eerror("invalid object length");

    gc_lock();

#if DEBUG
    assert(gc_check == false);
    gc_check = true;
#endif

    if (!gc_disable) {
#if GC_DEBUG
	if (gc_debug)
	    gc();
#else
	/* run gc at every roughly 16 megabytes of data allocated */
	if (gc_bytes > (1 << 24)) {
	    gc();
	    gc_bytes = 0;
	}
	gc_bytes += bytes;
#endif
    }

#if HAVE_SIMD
    if (posix_memalign((void **)&memory, 16, bytes))
	eerror("out of memory");
    memset(memory, '\0', bytes);
#else
    if ((memory = (ememory_t *)calloc(1, bytes)) == null)
	eerror("out of memory");
#endif

    /* update memory fields */
    if ((memory->next = root))
	root->prev = memory;
    root = memory;
    memory->length = length;
    memory->header = type;

    *pointer = memory_to_object(eobject_t, memory);

#if DEBUG
    assert(gc_check != false);
    gc_check = false;
#endif

    gc_unlock();
}

void
eresize_object(eobject_t *pointer, eint32_t length)
{
    eint32_t	 bytes;
    ememory_t	*block;
    ememory_t	*memory;

    bytes = sizeof(ememory_t) + ((length + 15) & ~15);

#if DEBUG
    /* cannot be null because need type information */
    if (pointer == null || *pointer == null)
	eerror("invalid null object");
#endif

    /* use only signed memory sized blocks */
    if (bytes < 0)
	eerror("invalid object length");

    gc_lock();

#if DEBUG
    assert(gc_check == false);
    gc_check = true;
#endif

    block = object_to_memory(*pointer);
    if (!(block->header & t_resize))
	eerror("memory is not resizable");
    if ((bytes - sizeof(ememory_t)) != ((block->length + 15) & ~15)) {
	if (!gc_disable) {
#if GC_DEBUG
	    if (gc_debug)
		gc();
#else
	    /* run gc at every roughly 16 megabytes of data allocated */
	    if (gc_bytes > (1 << 24)) {
		gc();
		gc_bytes = 0;
	    }
	    gc_bytes += bytes;
#endif
	}

	if ((memory = (ememory_t *)realloc((eobject_t)block, bytes)) == null)
	    eerror("out of memory");
	if (memory != block) {
#if HAVE_SIMD
	    /* FIXME ugly... should always allocate a new block? */
	    if ((eword_t)memory & 15) {
		ememory_t	*unaligned = memory;

		if (posix_memalign((void **)&memory, 16, bytes))
		    eerror("out of memory");
		if (sizeof(ememory_t) + ((unaligned->length + 15) & ~15) < bytes)
		    memcpy(memory, unaligned, sizeof(ememory_t ) +
			   ((unaligned->length + 15) & ~15));
		else
		    memcpy(memory, unaligned, bytes);
		free(unaligned);
	    }
#endif
	    if (block == root)
		root = memory;
	    else
		memory->prev->next = memory;
	    if (memory->next)
		memory->next->prev = memory;
	}

	/* return value is now known */
	*pointer = memory_to_object(eobject_t, memory);

	/* update and initialize if length has increased */
	if (length > memory->length)
	    memset((eint8_t *)*pointer + ((memory->length + 15) & ~15), 0,
		   (bytes - sizeof(ememory_t)) - ((memory->length + 15) & ~15));
	memory->length = length;
    }
    else
	block->length = length;

#if DEBUG
    assert(gc_check != false);
    gc_check = false;
#endif

    gc_unlock();
}

eint32_t
eobject_length(eobject_t object)
{
    if (efixnum_p(object))
	return (sizeof(eobject_t));
    return (object ? (object_to_memory(object))->length : 0);
}

etype_t
eobject_type_novm(eobject_t object)
{
    assert(vm_running == false);
    if (efixnum_p(object))
	return (t_fixnum);
    return (object ? (object_to_memory(object))->header : t_void);
}

etype_t
eobject_type(eobject_t object)
{
    /* helgrind dislikes reading the object header while it is marked by gc */
    return (object ? (object_to_memory(object))->header : t_void);
}

eobject_t
enew_cons(eobject_t car, eobject_t cdr)
{
    eobject_t	cons;
    v_enter();

    v_check(2);
    v_push(car);
    v_push(cdr);
    enew(&cons, cons);
    erplaca(cons, car);
    erplacd(cons, cdr);
    v_leave();

    return (cons);
}

eobject_t
enew_cons_nil(eobject_t car)
{
    eobject_t	cons;

    v_check(1);
    v_push(car);
    enew(&cons, cons);
    erplaca(cons, car);
    v_dec();

    return (cons);
}

void
enew_vector_novm(evector_t **pointer, etype_t type,
		 eint32_t length, eint32_t rank)
{
    assert(vm_running == false);
    v_check(1);
    v_push(null);
    enew_vector((evector_t **)&v_top(), type, length, rank);
    *pointer = v_pop();
}

void
enew_vector(evector_t **pointer, etype_t type, eint32_t length, eint32_t rank)
{
    eint32_t	 size;
    evector_t	*vector;

    /* allocate multiple of 4 space for dimensions information
     * note that the base object cannot have the address changed,
     * so, if need reallocation or increase rank above the next
     * multiple of 4, a different approach should be used */
    size = sizeof(evector_t) + (rank + (4 - (rank & 3))) * sizeof(edim_t);
    /* this must be thread_self->obj in bytecode execution */
    enew_object((eobject_t *)pointer, (type & ~t_resize) | VE_MASK, size);
    vector = *pointer;
    vector->length = length;
    switch (type & ~(CT_MASK|VE_MASK)) {
	case t_int8:  case t_uint8:
	    break;
	case t_int16: case t_uint16:
	    if (length > (most_positive_fix32 >> 1))
		eerror("out of bounds");
	    length <<= 1;
	    break;
	case t_int32: case t_uint32: case t_float32:
#if __WORDSIZE == 32
	default:
#endif
	    if (length > (most_positive_fix32 >> 2))
		eerror("out of bounds");
	    length <<= 2;
	    break;
	case t_int64: case t_uint64: case t_float64:
#if __WORDSIZE == 64
	default:
#endif
	    if (length > (most_positive_fix32 >> 3))
		eerror("out of bounds");
	    length <<= 3;
	    break;
    }
    enew_object(&vector->v.obj, t_resize | (type & ~VE_MASK), length);
    if ((vector->rank = rank) == 1) {
	vector->dims[0].dim = vector->length;
	vector->dims[0].mul = 1;
    }
}

void
erenew_vector(evector_t *vector, eint32_t length)
{
    eint32_t	bytes;
    switch (eobject_type(vector) & ~(CT_MASK|VE_MASK)) {
	case t_int8:  case t_uint8:
	    bytes = length;
	    break;
	case t_int16: case t_uint16:
	    if (length > (most_positive_fix32 >> 1))
		eerror("out of bounds");
	    bytes = length << 1;
	    break;
	case t_int32: case t_uint32: case t_float32:
#if __WORDSIZE == 32
	default:
#endif
	    if (length > (most_positive_fix32 >> 2))
		eerror("out of bounds");
	    bytes = length << 2;
	    break;
	case t_int64: case t_uint64: case t_float64:
#if __WORDSIZE == 64
	default:
#endif
	    if (length > (most_positive_fix32 >> 3))
		eerror("out of bounds");
	    bytes = length << 3;
	    break;
    }
    eresize_object(&vector->v.obj, bytes);
    vector->length = length;
    if (vector->rank == 1)
	vector->dims[0].dim = vector->length;
}

void
enew_pointer(eobject_t *pointer, etype_t type, eobject_t value, eint32_t offset)
{
    epointer_t	*object;

    enew_object(pointer, type | RE_MASK, sizeof(epointer_t));
    object = *(epointer_t **)pointer;
    object->ptr = value;
    object->offset = offset;
}

void
enew_localref(eobject_t *pointer, eint32_t offset)
{
    elocalref_t	*local;

    enew_object(pointer, t_localref, sizeof(elocalref_t));
    local = *(elocalref_t **)pointer;
    local->thread = thread_self;
    local->offset = offset;
}

void
enew_t_localref(eobject_t *pointer, eint32_t offset)
{
    elocalref_t	*local;

    enew_object(pointer, t_t_localref, sizeof(elocalref_t));
    local = *(elocalref_t **)pointer;
    local->thread = thread_self;
    local->offset = offset;
}

void
enew_globalref(eobject_t *pointer, eobject_t object)
{
    enew_object(pointer, t_globalref, sizeof(eobject_t));
    /* this pointer cannot be passed to gc, as it points to the middle of eds */
    **(eobject_t **)pointer = object;
}

void
enew_anonref(eobject_t *pointer, eobject_t object)
{
    v_check(1);
    v_push(object);
    enew_object(pointer, t_anonref, sizeof(eobject_t));
    **(eobject_t **)pointer = object;
    v_dec();
}

void
enew_integer_novm(eobject_t *pointer, eint_t value)
{
    assert(vm_running == false);
    if (value > most_positive_fixnum || value < most_negative_fixnum)
	enew_integer(pointer, value);
    else
	*pointer = efixnum(value);
}

void
enew_integer(eobject_t *pointer, eint_t value)
{
    enew_object(pointer, t_int, sizeof(eint_t));
    **(eint_t **)pointer = value;
}

void
enew_float(eobject_t *pointer, efloat_t value)
{
    enew_object(pointer, t_float, sizeof(efloat_t));
    **(efloat_t **)pointer = value;
}

void
enew_mpz(eobject_t *pointer, empz_t Z)
{
    empz_t	z;

    enew_object(pointer, t_mpz, sizeof(mpz_t));
    z = *(empz_t *)pointer;
    mpz_init_set(z, Z);
}

void
enew_mpz_i(eobject_t *pointer, eint_t i)
{
    empz_t	z;

    enew_object(pointer, t_mpz, sizeof(mpz_t));
    z = *(empz_t *)pointer;
#if __WORDSIZE == 32
    if (!emp_si_p(i)) {
	mpz_init_set_si(z, i >> 32);
	mpz_mul_2exp(z, z, 32);
	mpz_add_ui(z, z, i & 0xffffffff);
    }
    else
#endif
	mpz_init_set_si(z, i);
}

void
enew_mpz_u(eobject_t *pointer, euint_t u)
{
    empz_t	z;

    enew_object(pointer, t_mpz, sizeof(mpz_t));
    z = *(empz_t *)pointer;
#if __WORDSIZE == 32
    if (!emp_ui_p(u)) {
	mpz_init_set_ui(z, u >> 32);
	mpz_mul_2exp(z, z, 32);
	mpz_add_ui(z, z, u & 0xffffffff);
    }
    else
#endif
	mpz_init_set_ui(z, u);
}

void
enew_mpq(eobject_t *pointer, empq_t Q)
{
    empq_t	q;

    enew_object(pointer, t_mpq, sizeof(mpq_t));
    q = *(empq_t *)pointer;
    mpq_init(q);
    mpq_set(q, Q);
}

void
enew_mpr(eobject_t *pointer, empr_t R)
{
    empr_t	r;

    enew_object(pointer, t_mpr, sizeof(mpfr_t));
    r = *(empr_t *)pointer;
    mpfr_init2(r, thr_prc);
    mpfr_set(r, R, thr_rnd);
}

void
enew_cdd(eobject_t *pointer, complex double dd)
{
    ecdd_t	*c;

    enew_object(pointer, t_cdd, sizeof(ecdd_t));
    c = *(ecdd_t **)pointer;
    *c = dd;
}

void
enew_cqq(eobject_t *pointer, ecqq_t qq)
{
    ecqq_t	c;

    enew_object(pointer, t_cqq, sizeof(cqq_t));
    c = *(ecqq_t *)pointer;
    cqq_init(c);
    cqq_set(c, qq);
}

void
enew_mpc(eobject_t *pointer, empc_t cc)
{
    empc_t	c;

    enew_object(pointer, t_mpc, sizeof(mpc_t));
    c = *(empc_t *)pointer;
    mpc_init2(c, thr_prc);
    mpc_set(c, cc, thr_rndc);
}

void
enew_lambda(eobject_t *pointer, eobject_t code, eint32_t args, eint32_t bits)
{
    elambda_t	*lambda;

    enew_object(pointer, t_lambda, sizeof(elambda_t));
    lambda = *(elambda_t **)pointer;
    lambda->code.lambda = code;
    lambda->args = args;
    lambda->bits = bits;
}

eobject_t
ecopy_list(eobject_t list)
{
    eobject_t	cons;
    eobject_t	tail;
    v_enter();

    v_check(2);
    v_push(list);
    if (econs_p(list)) {
	cons = tail = econs_nil(ecar(list));
	v_push(cons);
	for (list = ecdr(list); econs_p(list); list = ecdr(list)) {
	    erplacd(tail, econs_nil(ecar(list)));
	    tail = ecdr(tail);
	}
	erplacd(tail, list);
    }
    else
	cons = list;
    v_leave();

    return (cons);
}

eobject_t
ecopy_tree(eobject_t list)
{
    eobject_t	cons;

    v_check(1);
    v_push(list);
    cons = copy_tree(list);
    v_dec();

    return (cons);
}

static eobject_t
copy_tree(eobject_t list)
{
    eobject_t	cons;

    v_check(1);
    v_push(list);
    switch (etype(list)) {
	case t_cons:
	    cons = v_top() = econs_nil(copy_tree(ecar(list)));
	    /* dotted lists are not generated */
	    for (list = ecdr(list); list; list = ecdr(list), cons = ecdr(cons))
		erplacd(cons, econs_nil(copy_tree(ecar(list))));
	    break;
	case t_mpz:
	    enew_mpz(&v_top(), (empz_t)list);
	    break;
	case t_mpq:
	    enew_mpq(&v_top(), (empq_t)list);
	    break;
	case t_mpr:
	    enew_mpr(&v_top(), (empr_t)list);
	    break;
	case t_cqq:
	    enew_cqq(&v_top(), (ecqq_t)list);
	    break;
	case t_mpc:
	    enew_mpc(&v_top(), (empc_t)list);
	    break;
	default:
	    break;
    }

    return (v_pop());
}

eobject_t
elast(eobject_t list)
{
    if (list) {
	assert(econs_p(list));
	for (; ecdr(list); list = ecdr(list))
	    ;
    }

    return (list);
}

eint32_t
elength(eobject_t list)
{
    eobject_t	next;
    eint32_t	count;

    for (next = list, count = 0; econs_p(list); list = ecdr(list), count += 2) {
	if (!econs_p(next))
	    break;
	next = ecdr(next);
	if (!econs_p(next)) {
	    ++count;
	    break;
	}
	next = ecdr(next);
	/* length may be used to detect circular lists */
	if (next == list) {
	    count = -1;
	    break;
	}
    }

    return (count);
}

eobject_t
ereverse(eobject_t list)
{
    eobject_t	temp;
    eobject_t	object;

    object = null;
    while (econs_p(list)) {
	temp = ecdr(list);
	erplacd(list, object);
	object = list;
	list = temp;
    }

    return (object);
}

#if DEBUG
void
egc_check_root(void)
{
    ememory_t	*ptr;

    /* this should cause a crash if the ememory_t linked list is
     * broken due to entirely or partially overwritten */
    if (root) {
	for (ptr = root; ptr->next; ptr = ptr->next)
	    ;
	for (; ptr->prev; ptr = ptr->prev)
	    ;
	assert(root == ptr);
    }
}

void
egc(void)
{
    gc_lock();
    egc_check_root();
    gc();
    egc_check_root();
    gc_unlock();
}
#endif

#if GC_DEBUG
static void
mark(ememory_t *memory)
{
    ememory_t	*ptr;

    for (ptr = root; ptr; ptr = (ememory_t *)((long)ptr->next & ~1)) {
	if (ptr == memory)
	    break;
    }
    assert(ptr == memory && ptr);
    memory->next = (ememory_t *)((long)memory->next | 1);
}
#endif

static void
gc(void)
{
    ememory_t	*next;
    ethread_t	*thread;
    ememory_t	*memory;

    /* prevent threads from exiting */
    ethreads_lock();
    
    gc_mark_roots();

    next = root;
    while ((memory = next)) {
	if (mark_p(memory)) {
	    clear(memory);
	    next = memory->next;
	}
	else {
	    next = memory->next;
	    /* FIXME needs a "destructor" */
	    switch (memory->header) {
		case t_stream:
		    eclose(memory_to_object(estream_t *, memory));
		    break;
		case t_mpz:
		    mpz_clear(memory_to_object(empz_t, memory));
		    break;
		case t_mpq:
		    mpq_clear(memory_to_object(empq_t, memory));
		    break;
		case t_mpr:
		    mpfr_clear(memory_to_object(empr_t, memory));
 		    break;
		case t_cqq:
		    cqq_clear(memory_to_object(ecqq_t, memory));
		    break;
		case t_mpc:
		    mpc_clear(memory_to_object(empc_t, memory));
		    break;
		case t_thread:
		    thread = memory_to_object(ethread_t *, memory);
		    cqq_clear(&thread->qq);
		    mpc_clear(&thread->c);
		    mpq_clear(&thread->q0);
		    mpf_clear(&thread->f0);
		    mpc_clear(&thread->c0);
		    mpq_clear(&thread->q1);
		    mpq_clear(&thread->q2);
		    mpq_clear(&thread->q3);
		    break;
		case t_mutex:
		    emutex_destroy(memory_to_object(pthread_mutex_t *, memory));
		    break;
#if XWINDOW
		case t_bitmap:
		case t_pixmap:
		    edestroy_pixmap(memory_to_object(epixmap_t *, memory));
		    break;
		case t_window:
		    edestroy_window(memory_to_object(ewindow_t *, memory));
		    break;
		case t_context:
		    edestroy_context(memory_to_object(econtext_t *, memory));
		    break;
#endif
	    }
	    if (unlikely(root == memory)) {
		if ((root = next))
		    root->prev = null;
	    }
	    else if ((memory->prev->next = next))
		next->prev = memory->prev;
#if DEBUG
	    /* FIXME This can be removed later, or maybe zero'ed for
	     * paranoia checks. The pattern 0x5c '\\' if cast to a pointer
	     * isn't a fixnum, so this should cause a crash if free'd
	     * memory is ever dereferenced. */
	    memset(memory, 0x5c, sizeof(ememory_t) + memory->length);
#endif
	    free(memory);
	}
    }

    /* allow threads to exit */
    ethreads_unlock();
}

static void
gc_mark_hash(ehash_t *hash)
{
    eentry_t	*entry;
    eint32_t	 offset;

    if (hash->entries) {
	mark(object_to_memory(hash->entries));
	if (hash->count) {
	    for (offset = 0; offset < hash->size; offset++) {
		entry = hash->entries[offset];
		for (; entry; entry = entry->next)
		    gc_mark(object_to_memory(entry));
	    }
	}
    }
}

static void
gc_mark_entry(eentry_t *entry)
{
    gc_mark(object_to_memory(entry->name));
    gc_mark(object_to_memory(entry->value));
}

static void
gc_mark_cons(eobject_t object)
{
    ememory_t	*memory;

    /* GC bit of first cons already set */
    gc_mark(object_to_memory(ecar(object)));
    /* Now also check for circularity */
    for (object = ecdr(object); ; object = ecdr(object)) {
	if (object == null || efixnum_p(object))
	    return;
	memory = object_to_memory(object);
	if (mark_p(memory))
	    return;
	if ((memory->header & (VE_MASK|RE_MASK|TY_MASK)) != t_cons) {
	    gc_mark(memory);
	    return;
	}
	mark(memory);
	gc_mark(object_to_memory(ecar(object)));
    }
}

static void
gc_mark_function(efunction_t *function)
{
    if (!function->name || !function->name->a_builtin) {
	gc_mark(object_to_memory(function->catches));
	gc_mark(object_to_memory(function->table));
	gc_mark(object_to_memory(function->gotos));
	gc_mark(object_to_memory(function->labels));
	gc_mark(object_to_memory(function->loads));
	gc_mark(object_to_memory(function->stores));
	gc_mark(object_to_memory(function->hsym));
	gc_mark(object_to_memory(function->vsym));
	if (function->esc)
	    mark(object_to_memory(function->esc));
	if (function->ptr)
	    mark(object_to_memory(function->ptr));
	if (function->set)
	    mark(object_to_memory(function->set));
	gc_mark(object_to_memory(function->functions));
	gc_mark(object_to_memory(function->code));
	gc_mark(object_to_memory(function->hash));
    }
}

static void
gc_mark_ast(east_list_t *ast)
{
    ememory_t	*memory;

    mark(object_to_memory(east_node(ast)));
    for (ast = east_next(ast); ast; ast = east_next(ast)) {
	memory = object_to_memory(ast);
	if (unlikely(mark_p(memory)))
	    break;
	mark(memory);
	mark(object_to_memory(east_node(ast)));
    }
}

static void
gc_mark_label(elabel_t *label)
{
    gc_mark(object_to_memory(label->srcs));
    gc_mark(object_to_memory(label->dsts));
    if (label->set)
	mark(object_to_memory(label->set));
    if (label->use)
	mark(object_to_memory(label->use));
    if (label->phi)
	mark(object_to_memory(label->phi));
    gc_mark(object_to_memory(label->dfa));
    if (label->infer_stack)
	gc_mark(object_to_memory(label->infer_stack));
    if (label->infer_types)
	gc_mark(object_to_memory(label->infer_types));
    gc_mark(object_to_memory(label->jumps));
}


#if JITVM
static void
gc_mark_jit_state(jit_state_t *state)
{
#  if JIT_REGSET_MPZ
    if (state->regarg)
	mark(object_to_memory(state->regarg));
    if (state->regsav)
	mark(object_to_memory(state->regsav));
    if (state->reglive)
	mark(object_to_memory(state->reglive));
    if (state->regmask)
	mark(object_to_memory(state->regmask));
#  endif
    if (state->blockmask)
	mark(object_to_memory(state->blockmask));
    if (state->spill)
	mark(object_to_memory(state->spill));
    if (state->gen)
	mark(object_to_memory(state->gen));
    if (state->values)
	mark(object_to_memory(state->values));
    gc_mark(object_to_memory(state->data_hash));
    gc_mark(object_to_memory(state->blocks));
    gc_mark(object_to_memory(state->patches));
    gc_mark(object_to_memory(state->functions));
    gc_mark(object_to_memory(state->pool));
#  if __arm__
    gc_mark(object_to_memory(state->data_info));
#  endif
}
#endif

static void
gc_mark(ememory_t *memory)
{
    eint8_t		*base;
    ertti_t		*rtti;
    etype_t		 type;
    eobject_t		 object;
    eint32_t		 offset;
    eobject_t		*pointer;
    ehashentry_t	*hashentry;
    union {
	east_list_t	*ast;
	eblock_t	*block;
	eentry_t	*entry;
	efunction_t	*function;
	ehash_t		*hash;
	ehashentry_t	*hashentry;
	ehashtable_t	*hashtable;
	ehswitch_t	*hswitch;
	elabel_t	*label;
#if JITVM
	jit_block_t	*jblock;
	jit_state_t	*jstate;
	jit_function_t	*jfunction;
#endif
	essa_load_t	*load;
	emacro_t	*macro;
	eobject_t	 object;
	eobject_t	*pointer;
	erecord_t	*record;
	edfa_rule_t	*rule;
	essa_store_t	*store;
	estream_t	*stream;
	esymbol_t	*symbol;
	etag_t		*tag;
	ethread_t	*thread;
	evector_t	*vector;
    } o;

again:
    if (memory == null_memory || efixnum_p(memory) || mark_p(memory))
	return;
    mark(memory);
    o.object = memory_to_object(eobject_t, memory);
    type = memory->header & (VE_MASK|RE_MASK|TY_MASK);
    switch (type) {
	case t_hashtable:
	    if (o.hashtable->entries) {
		mark(object_to_memory(o.hashtable->entries));
		if (o.hashtable->count) {
		    for (offset = 0; offset < o.hashtable->size; offset++) {
			hashentry = o.hashtable->entries[offset];
			for (; hashentry; hashentry = hashentry->next)
			    gc_mark(object_to_memory(hashentry));
		    }
		}
	    }
	    memory = object_to_memory(o.hashtable->vector);
	    goto again;
	case t_hashentry:
	    switch (o.hashentry->nt) {
		case t_int:		case t_float:		case t_cdd:
		    break;
		default:
		    gc_mark(object_to_memory(o.hashentry->nv.o));
		    break;
	    }
	    switch (o.hashentry->vt) {
		case t_int:		case t_float:		case t_cdd:
		    return;
		default:
		    break;
	    }
	    memory = object_to_memory(o.hashentry->vv.o);
	    goto again;
	case t_hash:
	    gc_mark_hash(o.hash);
	    break;
	case t_entry:
	    gc_mark_entry(o.entry);
	    break;
	case t_cons:
	    gc_mark_cons(o.object);
	    break;
	case t_function:
	    gc_mark_function(o.function);
	    break;
	case t_ast_list:
	    gc_mark_ast(o.ast);
	    break;
	case t_label:
	    gc_mark_label(o.label);
	    break;
	case t_ssa_load:
	    if (o.load->data) {
		memory = object_to_memory(o.load->data);
		mark(memory);
		offset = memory->length / sizeof(essa_load_data_t) - 1;
		for (; offset >= 0; offset--) {
		    if (o.load->data[offset].node)
			mark(object_to_memory(o.load->data[offset].node));
		    if (o.load->data[offset].lists)
			gc_mark(object_to_memory(o.load->data[offset].lists));
		    if (o.load->data[offset].labels)
			gc_mark(object_to_memory(o.load->data[offset].labels));
		}
	    }
	    break;
	case t_ssa_store:
	    if (o.store->data)
		mark(object_to_memory(o.store->data));
	    break;
#if JITVM
	case t_jit_block:
#  if JIT_REGSET_MPZ
	    if (o.jblock->reglive)
		mark(object_to_memory(o.jblock->reglive));
	    if (o.jblock->regmask)
		mark(object_to_memory(o.jblock->regmask));
#  endif
	    break;
	case t_jit_state:
	    gc_mark_jit_state(o.jstate);
	    break;
	case t_jit_function:
	    if (o.jfunction->regoff)
		mark(object_to_memory(o.jfunction->regoff));
#  if JIT_REGSET_MPZ
	    if (o.jfunction->regset)
		mark(object_to_memory(o.jfunction->regset));
#  endif
	    break;
#endif
	case t_symbol:
	    memory = object_to_memory(o.symbol->value);
	    goto again;
	case t_dfa_rule:
	    memory = object_to_memory(o.symbol->name);
	    goto again;
	case t_macro:
	    if (o.macro->hash)
		gc_mark(object_to_memory(o.macro->hash));
	    memory = object_to_memory(o.macro->value);
	    goto again;
#if HAVE_THREAD
	case t_thread:
	    if (!o.thread->running)
		gc_mark_thread(o.thread);
	    break;
#endif
	case t_stream:
	    gc_mark(object_to_memory(o.stream->name));
	    memory = object_to_memory(o.stream->ptr);
	    goto again;
	case t_block:
	    gc_mark(object_to_memory(o.block->table));
	    gc_mark(object_to_memory(o.block->t_jump));
	    gc_mark(object_to_memory(o.block->f_jump));
	    memory = object_to_memory(o.block->parent);
	    goto again;
	case t_tag:
	    memory = object_to_memory(o.tag->data);
	    goto again;
	case t_switch:
	    pointer = (eobject_t *)&o.hswitch->cases;
	    offset = (eobject_length(o.hswitch) - sizeof(ehswitch_t)) /
		sizeof(eobject_t);
	    for (; offset >= 0; offset--) {
		if ((object = pointer[offset]))
		    mark(object_to_memory(object));
	    }
	    break;
	case t_anonref:		/* protect eobject_t */
	case t_localref:	/* protect ethread_t */
	    memory = object_to_memory(o.vector->v.obj);
	    goto again;
	case t_class:
	    gc_mark(object_to_memory(o.record->methods));
	    if (o.record->vmethod)
		gc_mark(object_to_memory(o.record->vmethod));
	case t_struct:
	    if (o.record->offsets)
		gc_mark(object_to_memory(o.record->offsets));
	case t_union:
	    gc_mark(object_to_memory(o.record->vfield));
	    memory = object_to_memory(o.record->fields);
	    goto again;
	case t_vector_int8:			case t_vector_uint8:
	case t_vector_int16:			case t_vector_uint16:
	case t_vector_int32:			case t_vector_uint32:
	case t_vector_int64:			case t_vector_uint64:
	case t_vector_float32:			case t_vector_float64:
	    if ((o.object = o.vector->v.obj))
		mark(object_to_memory(o.object));
	    break;
#if DEBUG
	    /* objects that should exist only in vm register or vm stack */
	case t_rgz:			case t_shz:
	case t_rgq:			case t_shq:
	case t_rgr:			case t_shr:
	case t_rqq:			case t_sqq:
	case t_rgc:			case t_shc:
	case t_stackref:
	case t_t_stackref:
	case t_vectorref:
	case t_recordref:
	case t_hashref:
	    abort();
#endif
	default:
	    if (type & RE_MASK) {
		memory = object_to_memory(o.vector->v.obj);
		goto again;
	    }
	    if (type & VE_MASK) {
		if ((o.object = o.vector->v.obj)) {
		    memory = object_to_memory(o.object);
		    mark(memory);
		    pointer = o.pointer + memory->length / sizeof(eobject_t);
		    for (; o.pointer < pointer; o.pointer++)
			gc_mark(object_to_memory(*o.pointer));
		}
	    }
	    /* instances can only exist after type declaration */
	    else if (type < ets_idx) {
		rtti = ets + type;
		if (rtti->noffset) {
		    base = memory_to_object(eint8_t *, memory);
		    for (offset = 1; offset < rtti->noffset; offset++) {
			pointer = (eobject_t *)(base + rtti->offsets[offset]);
			if ((object = *pointer))
			    gc_mark(object_to_memory(object));
		    }
		    pointer = (eobject_t *)(base + rtti->offsets[0]);
		    if ((object = *pointer)) {
			memory = object_to_memory(object);
			goto again;
		    }
		}
	    }
	    break;
    }
}

static void
gc_mark_mpcache(eobject_t *entries, eobject_t indexes, eint32_t length)
{
    eobject_t	*o;

    if (entries) {
	mark(object_to_memory(entries));
	for (o = entries + length - 1; o >= entries; o--)
	    gc_mark(object_to_memory(*o));
	if (indexes)
	    mark(object_to_memory(indexes));
    }
}

static void
gc_mark_thread(ethread_t *thread)
{
    evalue_t	*v;
    eobject_t	*o;
#if HAVE_THREAD
    ebool_t	 stop;
    eint32_t	 error;

    stop = thread->running && !pthread_equal(pthread_self(), thread->pthread);

    /* suspend control thread */
    if (stop) {
#if USE_SEMAPHORE
	if ((error = pthread_kill(thread->pthread, SUSPEND_SIGNAL)))
	    eerror("pthread_kill: %s", strerror(error));
	/* wait for signal handler to enter */
	while (sem_wait(&enter_sem)) {
	    if (errno != EINTR)
		eerror("sem_wait: %s", strerror(errno));
	    sched_yield();
	}
#else
	sr_int = 0;
	if ((error = pthread_kill(thread->pthread, SUSPEND_SIGNAL)))
	    eerror("pthread_kill: %s", strerror(error));
	while (!sr_int)
	    sched_yield();
#endif
    }
#endif

    mark(object_to_memory(thread));
    gc_mark(object_to_memory(thread->obj));
    if (thread->ess) {
	/* if stack is reallocated, it will block */
	mark(object_to_memory(thread->ess));
	if (thread->esp) {
	    for (v = thread->ess; v < thread->esp; v++) {
		switch (v->t) {
		    case t_void:
		    case t_int:			/* aka t_int64 */
		    case t_float:		/* aka t_float64 */
			/* statically  typed objects */
		    case t_int8:		case t_uint8:
		    case t_int16:		case t_uint16:
		    case t_int32:		case t_uint32:
		    case t_uint64:		case t_float32:
			/* special pointer like object */
		    case t_globalref:
			/* shared mp objects */
		    case t_shz:			case t_shq:
		    case t_shr:
			/* shared complex objects */
		    case t_cdd:			case t_sqq:
		    case t_shc:
			break;
		    case t_hashref:
			/* ensure entry is protected in case
			 * operation side effects are "evil" */
			gc_mark(object_to_memory(v->v.href.entry));
			break;
#if DEBUG
			/* objects that should exist only in vm register */
		    case t_rgz:			case t_rgq:
		    case t_rgr:			case t_rqq:
		    case t_rgc:
			abort();
#endif
		    default:
			/* objects that may exist only in vm stack */
			gc_mark(object_to_memory(v->v.o));
			break;
		}
	    }
	    /* this call should ensure that only valid objects are below
	     * esp, so that it is not required to "clean" stack slots when
	     * calling/entering a function */
	    memset(thread->esp, 0, ((thread->ess + thread->len) - thread->esp) *
		   sizeof(evalue_t));
	}
#if 0
	/* unfortunately, this cannot be done (at least for now), as
	 * would need to ensure atomicity when setting type/value */
	 /* FIXME
	  * actually, it is possible, just follow the rules for
	  * stack values:
	  *	if settig to a packed type, set type first
	  *	else set type last
	  * it will work because the stack pointer is updated after
	  * operations are done, and objects are held in thread_self->obj
	  * after allocation
	  *	major difference is that the r_rg* types should be considered
	  * as packed types
	 */
	switch (thread->r0.t) {
	    case t_void:			case t_int:
	    case t_float:			case t_cdd:
	    case t_shz:				case t_rgz:
	    case t_shq:				case t_rgq:
	    case t_shr:				case t_rgr:
	    case t_sqq:				case t_rqq:
	    case t_shc:				case t_mpc:
	    case t_globalref:			case t_localref:
	    case t_t_localref:
		break;
	    default:
		gc_mark(thread->r0.v.o);
		break;
	}
#endif
	switch (thread->evalue.t) {
	    case t_void:			case t_int:
	    case t_float:			case t_cdd:
		break;
	    default:
		/* if no exception pending */
		if (thread->tryoff < 0)
		    thread->evalue.t = t_void;
		else
		    gc_mark(thread->evalue.v.o);
		break;
	}
    }
    gc_mark_mpcache(thread->c_z.entries, thread->c_z.indexes,
		    thread->c_z.length);
    gc_mark_mpcache(thread->c_q.entries, thread->c_q.indexes,
		    thread->c_q.length);
    gc_mark_mpcache(thread->c_r.entries, thread->c_r.indexes,
		    thread->c_r.length);
    gc_mark_mpcache(thread->c_qq.entries, thread->c_qq.indexes,
		    thread->c_qq.length);
    gc_mark_mpcache(thread->c_c.entries, thread->c_c.indexes,
		    thread->c_c.length);
    if (thread->egs) {
	mark(object_to_memory(thread->egs));
	for (o = thread->egs + thread->gsi - 1; o >= thread->egs; o--)
	    gc_mark(object_to_memory(*o));
    }
    if (thread->vec) {
	mark(object_to_memory(thread->vec));
	if (thread->vec->v.obj)
	    mark(object_to_memory(thread->vec->v.obj));
    }

#if HAVE_THREAD
    /* resume control thread */
    if (stop) {
#if USE_SEMAPHORE
	if (sem_post(&sleep_sem))
	    eerror("sem_post: %s", strerror(errno));
	/* wait for signal handler to leave */
	while (sem_wait(&leave_sem)) {
	    if (errno != EINTR)
		eerror("sem_wait: %s", strerror(errno));
	    sched_yield();
	}
#else
	sr_int = 0;
	if ((error = pthread_kill(thread->pthread, RESUME_SIGNAL)))
	    eerror("pthread_kill: %s", strerror(error));
	while (!sr_int)
	    sched_yield();
#endif
    }
#endif
}

static void
gc_mark_roots(void)
{
    eint32_t	 i;
    eint32_t	 j;
    eline_t	*line;
    enote_t	*note;
    ertti_t	*rtti;
    eobject_t	 list;
    eobject_t	*array;
#if HAVE_THREAD
    ethread_t	*thread;
#endif
    ememory_t	*memory;

    for (list = root_list; list; list = ecdr(list)) {
	memory = object_to_memory(list);
	mark(memory);
	array = ecar(list);
	gc_mark(object_to_memory(*array));
    }

#if HAVE_THREAD
    thread = tmain;
    do {
	gc_mark_thread(thread);
	thread = thread->next;
    } while (thread != tmain);
#else
    gc_mark_thread(tmain);
#endif

    mark(object_to_memory(ebs));

    /* ecs is a vector of symbols during compile, and a byte vector later */
    mark(object_to_memory(ecs));
#if !JITVM
    for (array = ecs + ecs_idx - 1; array >= ecs; array--)
	gc_mark(object_to_memory(*array));
#endif

    mark(object_to_memory(eds));
    for (array = eds + eds_idx - 1; array >= eds; array--)
	gc_mark(object_to_memory(*array));

    mark(object_to_memory(els));
    for (array = els + els_idx - 1; array >= els; array--)
	gc_mark(object_to_memory(*array));

    mark(object_to_memory(ets));
    for (rtti = ets + ets_idx - 1; rtti >= ets; rtti--) {
	if (rtti->fields)
	    mark(object_to_memory(rtti->fields));
	if (rtti->offsets)
	    mark(object_to_memory(rtti->offsets));
	if (rtti->methods) {
	    mark(object_to_memory(rtti->methods));
#if !JITVM
	    for (i = 0; i < rtti->nmethod; i++)
		mark(object_to_memory(rtti->methods[i].code));
#endif
	}
	if (rtti->translate)
	    mark(object_to_memory(rtti->translate));
    }
    mark(object_to_memory(record_vector));

    /* this memory should be not in gc, and possibly even not loaded to
     * memory, also, not marking function and filename vectors, as they
     * are in the constant string table, that should also not be marked
     * in every gc, as they are program lifetime constants */
    if (note_vector) {
	mark(object_to_memory(note_vector));
	for (note = note_vector, i = 0; i < note_count; note++, i++) {
	    if (note->lines) {
		mark(object_to_memory(note->lines));
		for (line = note->lines, j = 0; j < note->count; line++, j++) {
		    if (line->linenos)
			mark(object_to_memory(line->linenos));
		    if (line->offsets)
			mark(object_to_memory(line->offsets));
		}
	    }
	}
    }
}
