/* Definitions for the memory interface.

   This file is part of khipu.

   khipu is free software: you can redistribute it and/or modify
   it under the terms of the GNU Lesser General Public License as published by
   the Free Software Foundation; either version 3 of the License, or
   (at your option) any later version.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public License
   along with this program.  If not, see <https://www.gnu.org/licenses/>.  */

#include <cstdio>
#include <cstdlib>
#include <csignal>
#include <climits>
#include "khipu.hpp"

#ifdef KP_PLATFORM_UNIX
  #include <sys/mman.h>
  #include <unistd.h>
#elif defined (KP_PLATFORM_WINDOWS)
  #include <windows.h>
#endif

KP_DECLS_BEGIN

[[noreturn]] void raise_oom (interpreter *interp)
{
  if (!interp && !(interp = interpreter::self ()))
    {
      fputs ("memory exhausted\n", stderr);
      exit (1);
    }

  (void)interp->raise ("memory-error", "failed to allocate memory");
  exit (1);
}

void* xmalloc (size_t size)
{
  void *ret = malloc (size);
  if (!ret || (((uintptr_t)ret) & 7) != 0)
    raise_oom ();

  return (ret);
}

void* xrealloc (void *ptr, size_t size)
{
  void *ret = realloc (ptr, size);
  if ((!ret && size) || (ret && (((uintptr_t)ret) & 7) != 0))
    raise_oom ();

  return (ret);
}

void xfree (void *ptr)
{
  free (ptr);
}

#ifdef KP_ARCH_WIDE

void* ensure_mask_impl (void *ptr, int mask)
{
  if ((uintptr_t)ptr & ~((UINT64_C (1) << mask) - 1))
    raise_oom ();

  return (ptr);
}

#endif

void* alloch (size_t size, int type, int mask)
{
  varobj *ret = (varobj *)ensure_mask_impl (xmalloc (size), mask);
  ret->vo_full = 0;
  ret->vo_type = type;
  return (ret);
}

// GC implementation.

static const int BM_BITS = sizeof (uintptr_t) * 8;

struct conspage
{
  conspage *next;

  cons* at (unsigned int idx) const
    {
      cons *retp = (cons *)((char *)this + sizeof (int64_t));
      return (retp + idx);
    }

  inline uintptr_t* bitmap () const;

  // Given a pointer to a cons, return the page it belongs to.
  static conspage* from_cons (const cons *ptr);
};

#ifdef KP_PLATFORM_UNIX

static inline void*
alloc_npages (unsigned int n, unsigned int ps)
{
  void *ret = mmap (nullptr, ps * n, PROT_READ | 
    PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (ret == MAP_FAILED)
    raise_oom ();

  return (ensure_mask_impl (ret));
}

#elif defined (KP_PLATFORM_WINDOWS)

static inline void*
alloc_npages (unsigned int n, unsigned int ps)
{
  void *ret = VirtualAlloc (nullptr, ps * n,
    MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  if (!ret)
    raise_oom ();

  return (ensure_mask_impl (ret));
}

#endif

static void
fini_varobj (varobj *ptr)
{
  if (ptr->flagged_p (FLAGS_FINALIZABLE))
    {
      finobj *fp = (finobj *)ptr;
      if (fp->fini)
        {
          fp->fini (fp);
          fp->fini = nullptr;
        }
    }

  xfree (ptr);
}

struct cons_cache
{
  conspage *cache;
  int ncached;
};

template <typename Iter>
static object
link_conses (cons *ptr, uint32_t n, Iter& iter)
{
  // Link adjacent conses.
  for (uint32_t i = 0; i < n - 1; ++i)
    {
      ptr[i].car = *iter++;
      ptr[i].cdr = ptr[i + 1].as_obj ();
    }

  ptr[n - 1].car = *iter++;
  ptr[n - 1].cdr = NIL;
  return (ptr->as_obj ());
}

struct varobj_set
{
  dlist sets[2];

  void reset ()
    {
      this->sets[0].init_head ();
      this->sets[1].init_head ();
    }

  bool empty () const
    {
      return (this->sets[0].next == &this->sets[0] &&
        this->sets[1].next == &this->sets[1]);
    }

  void add_set (dlist *inp, int idx)
    {
      if (inp->next == inp)
        return;

      dlist *outp = &this->sets[idx];
      dlist *l1 = outp->prev, *f2 = inp->next, *l2 = inp->prev;

      l1->next = f2;
      f2->prev = l1;

      l2->next = l1;
      l1->prev = l2;
    }

  void add_obj (varobj *vp)
    {
      uintptr_t idx = (vp->vo_full & FLAGS_ATEXIT) / FLAGS_ATEXIT;
      this->sets[idx].add (&vp->gc_link);
    }

  void concat (varobj_set *sp)
    {
      this->add_set (&sp->sets[0], 0);
      this->add_set (&sp->sets[1], 1);
      sp->reset ();
    }
};

static const int FINOBJ_IDX = 1;

// GC-specific information.
struct gcinfo
{
  int pagesize;
  int pagecache_size;
  atomic_t xlock;
  size_t acc_bytes;
  size_t limit;
  sync_event event;
  bool full_gc;
  bool enabled;
  cons_cache ccache;
  uint32_t conses_per_page;
  varobj_set black;
  varobj_set gray;
  varobj_set white;
  memmgr *free_mgrs;
  size_t vo_size_limit;
  size_t vo_nelem_limit;
  uintptr_t gcbit;
  uintptr_t n_suspended;

  static const uintptr_t GC_MASK = FLAGS_GCBIT1 | FLAGS_GCBIT2 | FLAGS_GCDIRTY;
  static const uint32_t MAX_MARK_DEPTH = 64;

  bool init ()
    {
#ifdef KP_PLATFORM_WINDOWS
      SYSTEM_INFO info;
      GetSystemInfo (&info);
      this->pagesize = info.dwPageSize;
#elif defined (KP_PLATFORM_UNIX)
      this->pagesize = sysconf (_SC_PAGESIZE);
#endif

      /* Within a cons page, we need enough room for:
       * - A pointer to the next page.
       * - The conses themselves (We'll call this 'C').
       * - Enough bytes for a bitmap, to know which conses
       *   belong to the old generation.
       *
       * Given that, the parameter C can be determined as such:
       * PAGESIZE = 2P * C + C / 8 + P; where P is the wordsize.
       *
       * Solving the above equation gives us:
       * C = ((PAGESIZE - P) * 8) / (16P + 1)
       * Which we round down in order to play it safe, and to ensure
       * that the space for the bitmap is correctly aligned. */

      this->conses_per_page =
        ((this->pagesize - sizeof (conspage *)) * 8) /
          (16 * sizeof (conspage *) + 1) - 1;

#ifndef KP_ARCH_WIDE
      /* On 32-bit platforms, we furthermore need to ensure that conses
       * are properly aligned, so we use an additional word for that. */
      --this->conses_per_page;
#endif

      // XXX: Make these tunable.
      this->limit = 10 * 1024 * 1024;
      this->pagecache_size = 8 * sizeof (void *);
      this->vo_nelem_limit = 1024;
      this->vo_size_limit = this->limit / 16;

      this->ccache.cache = this->alloc_conspage ();
      this->free_mgrs = nullptr;
      lwlock_init (&this->xlock);
      this->black.reset ();
      this->gray.reset ();
      this->white.reset ();
      this->gcbit = FLAGS_GCBIT1;
      this->event.init ();
      return (true);
    }

  void lock (interpreter *interp)
    {
#ifndef KP_NO_THREADS
      lwlock_grab_nointr (interp, &this->xlock);
#endif
    }

  void unlock ()
    {
#ifndef KP_NO_THREADS
      lwlock_drop (&this->xlock);
#endif
    }

  void* alloc_pages (unsigned int n)
    {
      return (alloc_npages (n, this->pagesize));
    }

  conspage* alloc_conspage ()
    {
      if (this->ccache.ncached == 0)
        this->ccache.cache = (conspage *)
          this->alloc_pages (this->ccache.ncached = this->pagecache_size);

      conspage *ret = this->ccache.cache;
      this->ccache.cache = (conspage *)
        ((char *)this->ccache.cache + this->pagesize);
      --this->ccache.ncached;

      return (ret);
    }

  void do_gc (interpreter *interp);
  void mark_pkg (package *pkgp, uint32_t depth);
  void mark_cons (object obj, uint32_t depth);
  void mark (object obj, uint32_t depth = 0);

  void update_alloc (interpreter *interp, size_t size)
    {
      if ((this->acc_bytes += size) >= this->limit)
        this->do_gc (interp);
    }

  void sweep_varobjs (int idx, gcinfo& out)
    {
      for (auto it = this->white.sets[idx].iterator (&varobj::gc_link);
          it.valid (); )
        {
          auto curr = it++;
          varobj *vp = &*curr;
          curr.link()->del ();

          if (kp_unlikely (vp->flagged_p (out.gcbit)))
            { /* This object survived even though it's not in the black set,
               * most likely because of a write barrier. */
              vp->vo_full = (vp->vo_full & ~GC_MASK) | out.gcbit;
              out.black.add_obj (vp);
            }
          else
            {
              out.acc_bytes -= vp->vo_size;
              fini_varobj (vp);
            }
        }
    }

  void sweep_varobjs (gcinfo& out)
    {
      this->sweep_varobjs (0, out);
      this->sweep_varobjs (1, out);
    }

  bool set_black (varobj *vp)
    {
      if (vp->flagged_p (this->gcbit))
        return (false);

      vp->vo_full = (vp->vo_full & ~GC_MASK) | this->gcbit;
      if (vp->gc_link.linked_p ())
        {
          vp->gc_link.del ();
          this->black.add_obj (vp);
        }

      return (true);
    }

  void mark_gray (int idx)
    {
      for (auto it = this->gray.sets[idx].iterator (&varobj::gc_link);
          it.valid (); )
        {
          auto curr = it++;
          varobj *vp = &*curr;
          vp->vo_full &= ~FLAGS_GCWIP;
          this->mark (vp->as_obj ());
        }
    }

  void mark_gray ()
    {
      this->mark_gray (0);
      this->mark_gray (1);
    }

  void suspend_one (interpreter *interp, interpreter *target);
  void suspend_all (interpreter *interp);
  void resume_all (interpreter *interp);
  result<void> exit (interpreter *interp, object val);
};

struct gc_cards
{
  object *table;
  object st_tab[16];
  uint32_t nelem;
  uint32_t nsize;

  void init ()
    {
      this->table = this->st_tab;
      this->nelem = 0;
      this->nsize = (uint32_t)KP_NELEM (this->st_tab);

      for (uint32_t i = 0; i < this->nsize; ++i)
        this->table[i] = UNBOUND;
    }

  result<void> resize (interpreter *interp)
    {
      uint32_t nsz = this->nsize * 2;
      object ax = KP_TRY (alloc_array (interp, nsz));
      array *np = as_array (ax);

      for (uint32_t i = 0, j = 0; j < this->nelem; ++i)
        {
          object obj = this->table[i];
          if (obj == UNBOUND)
            continue;

          uint32_t idx = (uint32_t)((obj >> 3) & (nsz - 1));
          while (true)
            {
              if (np->data[idx] != UNBOUND)
                {
                  np->data[idx] = obj;
                  break;
                }

              idx = (idx + 1) & (nsz  - 1);
            }

          ++j;
        }

      this->table = np->data;
      this->nsize = nsz;
      return (0);
    }

  result<void> add_obj (interpreter *interp, object obj)
    {
      uint32_t idx = (uint32_t)((obj >> 3) & (this->nsize - 1));
      while (true)
        {
          if (this->table[idx] == obj)
            return (0);
          else if (this->table[idx] == UNBOUND)
            {
              this->table[idx] = obj;
              if (++this->nelem * 100 > this->nsize * 80)
                KP_VTRY (this->resize (interp));

              return (0);
            }

          idx = (idx + 1) & (this->nsize - 1);
        }
    }
};

struct cycle_alloc_iter
{
  object value;

  cycle_alloc_iter (object v) : value (v)
    {
    }

  cycle_alloc_iter& operator++ (int)
    {
      return (*this);
    }

  object operator* ()
    {
      return (this->value);
    }
};

struct memmgr
{
  conspage *pages;
  object free_list;
  uint32_t nfree;
  uint32_t top_used;
  varobj_set vobjs;
  size_t vo_size;
  uint32_t vo_nelem;
  gc_cards cards;
  memmgr *next;

  void init ()
    {
      this->pages = nullptr;
      this->free_list = NIL;
      this->nfree = this->vo_nelem = 0;
      this->top_used = 0xffff;
      this->vo_size = 0;
      this->vobjs.reset ();
      this->cards.init ();
    }

  conspage* fetch_conspage (gcinfo& gc)
    {
      conspage *ret = gc.alloc_conspage ();
      ret->next = this->pages;
      this->pages = ret;

      return (ret);
    }

  result<conspage*> alloc_conspage (interpreter *interp, gcinfo& gc)
    {
      conspage *ret;

      {
        KP_MT_BEGIN (lwlock_grab_nointr (interp, &gc.xlock));
        ret = gc.alloc_conspage ();
        gc.update_alloc (interp, gc.pagesize);
        KP_MT_END (lwlock_drop (&gc.xlock));
      }

      ret->next = this->pages;
      this->pages = ret;
      this->top_used = 0;

      return (ret);
    }

  void push_cons (cons *ptr, gcinfo& gc)
    {
      ptr->car = UNBOUND;
      ptr->cdr = this->free_list;
      this->free_list = ptr->as_obj ();
      ++this->nfree;
      gc.acc_bytes -= sizeof (*ptr);
    }

  cons* pop_cons ()
    {
      cons *retp = as_cons (this->free_list);
      this->free_list = retp->cdr;
      return (retp);
    }

  result<object> alloc_cons1 (interpreter *interp, gcinfo& gc)
    {
      cons *ret;

      if (this->nfree != 0)
        { // Use the free list.
          ret = this->pop_cons ();
          --this->nfree;
        }
      else
        {
          if (this->top_used >= gc.conses_per_page)
            { // Top page is empty - Allocate a new one.
              KP_VTRY (this->alloc_conspage (interp, gc));
            }

          ret = this->pages->at (this->top_used);
          ++this->top_used;
        }

      interp->alval = ret->as_obj ();
      ret->car = ret->cdr = NIL;
      return (interp->alval);
    }

  template <typename Iter>
  object alloc_ncons (interpreter *interp, gcinfo& gc, uint32_t n,
                      Iter it, object **tail)
    {
      object *outp = &interp->alval;
      uint32_t i;

      *outp = NIL;

      // Consume the free list before using the pages.
      for (i = 0; i < min (this->nfree, n); ++i)
        {
          cons *tmp = this->pop_cons ();
          tmp->car = *it++;
          *outp = tmp->as_obj (), outp = &tmp->cdr;
        }

      this->nfree -= i;
      if ((n -= i) == 0)
        {
          *(*tail = outp) = NIL;
          return (interp->alval);
        }

      cons *ptr = this->pages->at (this->top_used);
      auto opp = gc.conses_per_page;
      int avail = (int)opp - (int)this->top_used;

      if (avail >= (int)n)
        { // Top page has enough conses to satisfy the request.
          *outp = link_conses (ptr, n, it);
          this->top_used += n;
        }
      else
        {
          uint32_t n_pages = 1;

          if (avail > 0)
            { // Consume the top page before allocating more.
              *outp = link_conses (ptr, avail, it);
              outp = &ptr[avail - 1].cdr;
              n -= avail;
            }

          KP_MT_BEGIN (lwlock_grab_nointr (interp, &gc.xlock));
          for (; n > opp; n -= opp, ++n_pages)
            {
              ptr = this->fetch_conspage(gc)->at (0);
              *outp = link_conses (ptr, opp, it);
              outp = &ptr[opp - 1].cdr;
            }

          ptr = this->fetch_conspage(gc)->at (0);
          this->top_used = n;
          *outp = link_conses (ptr, n, it);
          gc.update_alloc (interp, n_pages * gc.pagesize);
          KP_MT_END (lwlock_drop (&gc.xlock));
        }

      *tail = &ptr[n - 1].cdr;
      return (interp->alval);
    }

  void reset_varobjs (gcinfo& gc)
    {
      gc.white.concat (&this->vobjs);
      gc.acc_bytes += this->vo_size;
      this->vo_size = this->vo_nelem = 0;
    }

  void flush_varobjs (interpreter *interp, gcinfo& gc)
    {
      auto saved = this->vo_size;

      KP_MT_BEGIN (gc.lock (interp));
      gc.white.concat (&this->vobjs);
      this->vo_size = 0;
      gc.update_alloc (interp, saved);
      KP_MT_END (gc.unlock ());

      this->vo_nelem = 0;
    }

  void add_varobj (interpreter *interp, gcinfo& gc,
                          varobj *ptr, size_t bytes)
    {
      this->vobjs.add_obj (ptr);
      ptr->vo_size = (uint32_t)bytes;
      this->vo_size += bytes;
      ++this->vo_nelem;

      if (this->vo_size > gc.vo_size_limit ||
          this->vo_nelem > gc.vo_nelem_limit)
        this->flush_varobjs (interp, gc);
    }

  void clear_bitmaps (uint32_t opp)
    {
      for (conspage *p = this->pages; p != nullptr; p = p->next)
        memset (p->bitmap (), 0, (opp + 7) / 8);
    }

  void sweep_cons_page (gcinfo& gc, conspage *page, uint32_t n)
    {
      uintptr_t *bp = page->bitmap ();
      for (uint32_t i = 0; i < n; ++i)
        {
          cons *ptr = page->at (i);
          if (ptr->car != UNBOUND &&
              (bp[i / BM_BITS] & ((uintptr_t)1 << (i % BM_BITS))) == 0)
            this->push_cons (ptr, gc);
        }
    }

  void sweep (gcinfo& gc)
    {
      if (!this->pages)
        return;

      auto opp = gc.conses_per_page;
      for (conspage *p = this->pages->next; p != nullptr; p = p->next)
        this->sweep_cons_page (gc, p, opp);

      this->sweep_cons_page (gc, this->pages, this->top_used);
    }

  void mark_cards (gcinfo& gc)
    {
      // Mark the GC cards, but not the containing vector itself.
      for (uint32_t i = 0; i < this->cards.nsize; ++i)
        gc.mark (this->cards.table[i]);

      // Done with the vector.
      this->cards.init ();
    }
};

static gcinfo KP_gc;

memmgr* memmgr_alloc ()
{
  while (true)
    {
      memmgr *mp = KP_gc.free_mgrs;
      if (!mp)
        break;
      else if (atomic_cas_bool ((atomic_t *)&KP_gc.free_mgrs,
                                (atomic_t)mp, (atomic_t)mp->next))
        return (mp);

      atomic_spin_nop ();
    }

  memmgr *ret = (memmgr *)xmalloc (sizeof (*ret));
  ret->init ();

  return (ret);
}

void memmgr_free (memmgr *mp)
{
  while (true)
    {
      memmgr *tp = KP_gc.free_mgrs;
      mp->next = tp;
      if (atomic_cas_bool ((atomic_t *)&KP_gc.free_mgrs,
                           (atomic_t)tp, (atomic_t)mp))
        return;

      atomic_spin_nop ();
    }
}

uintptr_t* conspage::bitmap () const
{
  return ((uintptr_t *)this->at (KP_gc.conses_per_page));
}

conspage* conspage::from_cons (const cons *ptr)
{
  return ((conspage *)((uintptr_t)ptr & ~(KP_gc.pagesize - 1)));
}

void gc_lock (interpreter *interp)
{
  KP_gc.lock (interp);
}

void gc_unlock ()
{
  KP_gc.unlock ();
}

void gc_register (interpreter *interp, varobj *ptr, size_t bytes)
{
  interp->mmgr->add_varobj (interp, KP_gc, ptr, bytes);
}

result<object> alloc_cons (interpreter *interp)
{
  auto eg = KP_TRY (evh_guard::make (interp));
  return (interp->mmgr->alloc_cons1 (interp, KP_gc));
}

result<object> alloc_cons (interpreter *interp, uint32_t n,
                           object fill, object **tail)
{
  cycle_alloc_iter cit (fill);
  object *dummy;

  if (!tail)
    tail = &dummy;

  auto eg = KP_TRY (evh_guard::make (interp));
  return (interp->mmgr->alloc_ncons (interp, KP_gc, n, cit, tail));
}

result<object> alloc_cons (interpreter *interp, uint32_t n,
                           object *argv, object **tail)
{
  object *dummy;
  if (!tail)
    tail = &dummy;

  auto eg = KP_TRY (evh_guard::make (interp));
  return (interp->mmgr->alloc_ncons (interp, KP_gc, n, argv, tail));
}

result<object> gcreq_handler (interpreter *interp, object *, int)
{
  // Flush memory state and ackowledge request.
  evh_safeguard eg { interp };
  atomic_mfence ();
  interp->sync_ev()->wait (interp);
  kp_return (fixint (0));
}

void gcinfo::suspend_one (interpreter *interp, interpreter *target)
{
  if (target == interp)
    return;

  /* Save the state and notify the thread of the suspension request,
   * if it's running. Otherwise, the thread will have to wait on the
   * GC event once it's done blocking. */

  target->lock_remote (interp);
  target->saved_state = target->state;
  target->state = INTERP_SUSPENDED;
  target->sync_ev() = &this->event;

  if (target->saved_state == INTERP_RUNNING)
    {
      target->set_ev (GCREQ_EV);
      ++this->n_suspended;
    }

  target->unlock ();
}

static inline dlist::iter_type<thread> threads_iter ()
{
  return (all_threads.iterator (&thread::thr_link));
}

void gcinfo::resume_all (interpreter *interp)
{
  for (auto it = threads_iter (); it.valid (); ++it)
    {
      interpreter *target = it->interp;
      if (target == interp)
        continue;

      // Restore interpreter state.
      target->lock_remote (interp);
      target->state = target->saved_state;
      target->unlock ();
    }

  lwlock_drop (&all_threads_lock);
  // Wake all threads.
  this->event.wake_all (interp);
}

// Marking routines.

struct depth_guard
{
  uint32_t *ptr;

  depth_guard (uint32_t *p) : ptr (p)
    {
      ++*this->ptr;
    }

  ~depth_guard ()
    {
      --*this->ptr;
    }
};

static inline bool
ub_tail_p (object obj)
{
  for (; cons_p (obj); obj = xcdr (obj)) ;
  return (obj == UNBOUND);
}

static inline bool
left_tail_p (object obj, uint32_t cnt)
{
  if (cnt-- == 0)
    return (false);

  for (; cnt != 0; --cnt)
    {
      obj = xcar (obj);
      if (!cons_p (obj) || !cons_p (obj = xcdr (obj)))
        return (false);
    }

  return (xcons_p (obj) && ub_tail_p (xcar (obj)));
}

void gcinfo::mark_cons (object obj, uint32_t depth)
{
  if (!as_cons(obj)->gc_mark ())
    return;

  object curr = obj, pred = UNBOUND;
  bool fwd = true;
  uint32_t cnt = 0;

  while (true)
    {
      if (fwd)
        { // Move on to the CDR.
          object tmp = xcdr (curr);

          if (cons_p (tmp) && as_cons(tmp)->gc_mark ())
            {
              xcdr(curr) = pred;
              pred = curr, curr = tmp;
            }
          else
            { // Reached the end - Move left.
              if (!xcons_p (tmp))
                this->mark (tmp, depth);

              swap (curr, pred);
              fwd = false;
            }
        }
      else
        {
          object tmp = xcar (pred);
          if (!cons_p (tmp))
            this->mark (tmp, depth);
          else if (as_cons(tmp)->gc_mark ())
            { // Found a sub-cons - Descend and move right.
              xcar(pred) = curr;
              curr = tmp;
              fwd = true;
              ++cnt;
              continue;
            }

          as_cons(pred)->gc_mark ();
          if (curr == UNBOUND)
            goto end;

          tmp = xcdr (curr);
          if ((!cons_p (tmp) && tmp != UNBOUND) ||
              left_tail_p (curr, cnt))
            { // Reached the end - Ascend and continue left.
              tmp = xcar (curr);
              xcar(curr) = pred;
              pred = curr, curr = tmp;
              --cnt;
              continue;
            }

          xcdr(curr) = pred;
          pred = curr, curr = tmp;

          if (curr == UNBOUND)
            {
            end:
              tmp = xcar (pred);
              if (!cons_p (tmp))
                {
                  this->mark (tmp, depth);
                  break;
                }
              else if (!as_cons(tmp)->gc_mark ())
                break;
              else
                { // Recurse on innermost cons.
                  curr = tmp;
                  pred = UNBOUND;
                  fwd = true;
                }
            }
        }
    }
}

void gcinfo::mark_pkg (package *pkgp, uint32_t depth)
{
  this->set_black (as_varobj (pkgp->name));
  this->mark (pkgp->path);

  /* For a package's symbol table, we don't want to mark temporary
   * symbols such as those produced by 'let' and function bindings,
   * since they are generally garbage. Therefore, we inline the
   * marking routine here. */

  array *syms = as_array (pkgp->syms);

  if (!this->set_black (syms))
    return;

  for (uint32_t i = 0; i < syms->len; ++i)
    {
      object s = syms->data[i];
      if (symbol_p (s) && symval (s) != UNBOUND)
        this->mark (s, depth);
    }
}

static inline bool
composite_type_p (int tp)
{
  switch (tp)
    {
      case typecode::ARRAY:
      case typecode::TABLE:
      case typecode::TUPLE:
      case typecode::CUSTOM:
        return (true);
      default:
        return (false);
    }
}

struct gc_visitor : public visitor
{
  uint32_t depth;
  gcinfo* pgc;

  gc_visitor (uint32_t d) : depth (d), pgc (&KP_gc)
    {
    }

  void operator() (interpreter *, object obj)
    {
      this->pgc->mark (obj, this->depth);
    }
};

void gcinfo::mark (object obj, uint32_t depth)
{
  // Unmask the extra bit and check for common values.
  if (obj == result<object>::ERROR)
    return;

  obj &= ~EXTRA_BIT;
  if (obj == NIL || obj == UNBOUND)
    return;

  int tp = itype (obj);

  if (tp == typecode::INT || tp == typecode::CHAR)
    return;
  else if (tp == typecode::CONS)
    {
      this->mark_cons (obj, depth);
      return;
    }

  depth_guard g { &depth };
  varobj *vp = as_varobj (obj);
  if (depth > MAX_MARK_DEPTH && composite_type_p (tp))
    {
      if (vp->flagged_p (this->gcbit | FLAGS_GCWIP))
        return;
      else if (vp->gc_link.linked_p ())
        { // Not a local varobject.
          vp->vo_full |= FLAGS_GCWIP;
          vp->gc_link.del ();
          this->gray.add_obj (vp);
          return;
        }
    }
  else if (!this->set_black (vp))
    return;

  switch (tp)
    {
      case typecode::ARRAY:
        {
          array *ap = (array *)vp;
          for (uint32_t i = 0; i < ap->len; ++i)
            this->mark (ap->data[i], depth);

          break;
        }

      case typecode::TABLE:
        {
          table *tp = (table *)vp;
          this->mark (tp->vector, depth);
          this->mark (tp->cmpfct, depth);
          this->mark (tp->hashfct, depth);
          break;
        }

      case typecode::TUPLE:
        {
          tuple *tp = (tuple *)vp;
          this->mark (tp->head, depth);
          this->mark (tp->test, depth);
          break;
        }

      case typecode::STREAM:
        {
          stream *strmp = (stream *)vp;
          this->mark (strmp->bvec);
          this->mark (strmp->ilock);
          this->mark (strmp->pos.state);
          this->mark (strmp->extra, depth);
          break;
        }

      case typecode::SYMBOL:
        {
          symbol *sp = (symbol *)vp;
          this->set_black (as_varobj (sp->name));
          this->mark (sp->value, depth);
          this->mark (sp->pkg, depth);
          break;
        }

      case typecode::PKG:
        this->mark_pkg ((package *)vp, depth);
        break;

      case typecode::FCT:
        {
          this->mark (fct_name (obj));

          if (vp->flagged_p (function_base::native_flag))
            return;

          function *fp = (function *)vp;
          this->set_black (as_varobj (fp->bcode));
          this->mark (fp->vals, depth);
          this->mark (fp->env, depth);
          break;
        }

      case typecode::CORO:
        {
          coroutine *crp = (coroutine *)vp;
          this->mark (crp->value, depth);
          this->mark (crp->argv, depth);
          this->mark (crp->dbinds, depth);
          break;
        }

      case typecode::THREAD:
        this->mark (((thread *)vp)->retval, depth);
        break;

      case typecode::INSTANCE:
        {
          auto instp = (instance *)vp;
          this->mark (instp->ptype, depth);
          this->mark (instp->slots, depth);
          this->mark (instp->tspec, depth);
          this->mark (instp->builtin, depth);
          break;
        }

      case typecode::CUSTOM:
        {
          gc_visitor v { depth };
          ((custom_base *)vp)->visit (nullptr, v);
          break;
        }
    }
}

static uintptr_t*
cons_gc_data (const cons *cnp, uintptr_t& mask)
{
  conspage *page = conspage::from_cons (cnp);
  uintptr_t *bp = page->bitmap ();
  uint32_t idx = (uint32_t)(cnp - page->at (0));

  mask = (uintptr_t)1 << (idx % BM_BITS);
  return (bp + idx / BM_BITS);
}

bool cons::gc_mark ()
{
  uintptr_t mask, *bp = cons_gc_data (this, mask);

  if (*bp & mask)
    return (false);

  *bp |= mask;
  return (true);
}

bool cons::old_gen_p () const
{
  uintptr_t mask, *bp = cons_gc_data (this, mask);
  return ((*bp & mask) != 0);
}

void gcinfo::suspend_all (interpreter *interp)
{
  this->n_suspended = 0;
  lwlock_grab_nointr (interp, &all_threads_lock);
  for (auto it = threads_iter (); it.valid (); ++it)
    {
      interpreter *ip2 = it->interp;
      this->suspend_one (interp, ip2);
      if (this->full_gc)
        ip2->mmgr->clear_bitmaps (this->conses_per_page);
    }
  lwlock_drop (&all_threads_lock);

  auto& ev = this->event;
  lwlock_grab_nointr (interp, &ev.lock);

  /* If we got here before the suspended threads acknowledged
   * the request, wait for them now. */
  if (ev.n_waiters != this->n_suspended)
    for (ev.limit = this->n_suspended; ev.limit; )
      ev.cv.wait_nointr (interp, &ev.lock);

  lwlock_drop (&ev.lock);
}

void gcinfo::do_gc (interpreter *interp)
{
  if (!this->enabled)
    return;
  else if (this->full_gc)
    {
      this->white.concat (&this->black);
      this->black.reset ();
      this->gcbit ^= GC_MASK;
    }

  this->event.reset ();
  this->suspend_all (interp);

  for (auto it = threads_iter (); it.valid (); ++it)
    {
      thread& thr = *it;
      interpreter *ip2 = thr.interp;

      auto mmgr = ip2->mmgr;
      mmgr->reset_varobjs (*this);
      mmgr->mark_cards (*this);

      // Mark the interpreter's registers.
      this->mark (ip2->thread);
      this->mark (ip2->last_err);
      this->mark (ip2->last_tb);
      this->mark (ip2->retval);
      this->mark (ip2->alval);
      this->mark (ip2->aux);

      if (ip2->xpkg != root_package)
        this->mark (ip2->xpkg);

      // Mark the interpreter's stack.
      this->set_black (as_varobj (ip2->stkobj));
      for (object *p = ip2->stack; p != ip2->stkend; ++p)
        this->mark (*p);

      // Mark the interpreter's dynamic bindings.
      for (uintptr_t i = 0; i < ip2->n_tlsyms; ++i)
        this->mark (ip2->tl_syms[i]);

      // Mark the interpreter's temporary values.
      for (auto i = ip2->values.iterator (&valref::link); i.valid (); ++i)
        this->mark (i->value);
    }

  /* Also mark the GC cards of any thread that may have exited,
   * and its memory manager was not reaped. */
  for (auto mp = this->free_mgrs; mp; mp = mp->next)
    mp->mark_cards (*this);

  // Mark the builtins.
  for (auto sp = symbol::fast_global_syms; *sp != UNBOUND; ++sp)
    this->mark (*sp);

  // Mark the global symbols.
  this->mark_pkg (as_package (root_package), 0);

  /* For the keyword package, we only mark the package object and
   * the symbol table. The reason being, keywords will always survive,
   * since their symbol value is aliased to themselves. */
  {
    package *kwp = as_package (kword_package);
    this->set_black (as_varobj (kwp->name));
    this->set_black (kwp);
    this->set_black (as_array (kwp->syms));
  }

  while (!this->gray.empty ())
    this->mark_gray ();

  // Now the mark phase is over. Sweep the garbage.
  size_t prev = this->acc_bytes;
  for (auto it = threads_iter (); it.valid (); ++it)
    it->interp->mmgr->sweep (*this);

  this->sweep_varobjs (*this);

  if (this->full_gc)
    this->full_gc = false;
  else if (this->acc_bytes >= prev / 3)
    /* If we didn't manage to reclaim at least 2/3's of the
     * total memory, perform a full collection next time. */
    this->full_gc = true;

  // Reset the allocated bytes accumulator.
  this->acc_bytes = 0;
  
  // GC done. Resume the world.
  this->resume_all (interp);
}

result<void> gcinfo::exit (interpreter *interp, object val)
{
  auto g = KP_TRY (evh_guard::make (interp));

  if (!singlethr_p ())
    {
      this->lock (interp);
      lwlock_grab_nointr (interp, &all_threads_lock);

      for (auto it = threads_iter (); it.valid (); ++it)
        {
          interpreter *ip2 = it->interp;
          this->suspend_one (interp, ip2);
          ip2->mmgr->reset_varobjs (*this);
        }

      this->unlock ();
    }

  int status;

  if (fixint_p (val))
    status = as_int (val);
  else
    {
      KP_TRY (xwrite (interp, as_stream (out_stream), val));
      status = 1;
    }

  ::exit (status);
  return (0);   // NOTREACHED.
}

result<void> gc (bool full)
{
  interpreter *interp = interpreter::self ();
  auto g = KP_TRY (evh_guard::make (interp));

  KP_MT_BEGIN (KP_gc.lock (interp));
  KP_gc.full_gc = KP_gc.full_gc || full;
  KP_gc.do_gc (interp);
  KP_MT_END (KP_gc.unlock ());
  return (0);
}

static inline bool
oldgen_p (object obj, uintptr_t bit)
{
  return ((varobj_p (obj) && as_varobj(obj)->flagged_p (bit)) ||
          (xcons_p (obj) && as_cons(obj)->old_gen_p ()));
}

result<void> gc_wbarrier (interpreter *interp, object parent, object obj)
{
  auto bit = KP_gc.gcbit;

  if (immediate_p (obj) || obj == NIL || KP_gc.full_gc ||
      oldgen_p (obj, bit) || !oldgen_p (parent, bit))
    return (0);
  else if (!xcons_p (parent))
    {
      varobj *vp = as_varobj (parent);
      if (vp->cas_flag (bit, FLAGS_GCDIRTY))
        { /* This object is in the black set. Unlinking it should be safe,
           * even in the presence of multiple threads. */
          vp->gc_link.del ();
          interp->mmgr->vobjs.add_obj (vp);
        }

      return (0);
    }

  switch (itype (obj))
    {
      case typecode::BVECTOR:
      case typecode::STR:
      case typecode::BIGINT:
      case typecode::FLOAT:
      case typecode::BIGFLOAT:
        /* Avoid potentially expensive memory allocations when a single
         * atomic flag setting can do the job. */
        as_varobj(obj)->set_flag (bit);
        return (0);
      default:
        return (interp->mmgr->cards.add_obj (interp, parent));
    }
}

static void
call_finalizers (dlist *lstp)
{
  for (auto it = lstp->iterator (&finobj::gc_link); it.valid (); ++it)
    {
      finobj *fp = (finobj *)&*it;
      if (fp->fini)
        fp->fini (fp);
    }
}

bool memory_init ()
{
  return (KP_gc.init ());
}

bool& gc_status ()
{
  return (KP_gc.enabled);
}

result<object> exit_fct (interpreter *interp, object *argv, int argc)
{
  KP_TRY (KP_gc.exit (interp, argc ? *argv : fixint (0)));
  return (fixint (0));
}

void memory_exit ()
{
  interpreter *interp = interpreter::self ();
  evh_safeguard eg { interp };

  deref (as_stream(out_stream)->flush (interp));
  deref (as_stream(err_stream)->flush (interp));

  KP_MT_BEGIN (KP_gc.lock (interp));
  for (auto it = threads_iter (); it.valid (); ++it)
    call_finalizers (&it->interp->mmgr->vobjs.sets[FINOBJ_IDX]);

  call_finalizers (&KP_gc.black.sets[FINOBJ_IDX]);
  call_finalizers (&KP_gc.white.sets[FINOBJ_IDX]);
}

KP_DECLS_END

