/* Definitions for the tuple type.

   This file is part of khipu.

   khipu is free software: you can redistribute it and/or modify
   it under the terms of the GNU Lesser General Public License as published by
   the Free Software Foundation; either version 3 of the License, or
   (at your option) any later version.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public License
   along with this program.  If not, see <https://www.gnu.org/licenses/>.  */

#include "khipu.hpp"
#include "utils/lazy.hpp"
#include <new>

KP_DECLS_BEGIN

static const int MAX_DEPTH = 24;

static int
rand_levels (interpreter *interp, tuple *tp)
{
  atomic_t lvl = ctz (interp->xrand ()) * 2 / 3;
  if (lvl == 0)
    return (1);

  while (true)
    {
      atomic_t prev = tp->hi_water;
      if (lvl <= prev)
        return (lvl);
      else if (prev == MAX_DEPTH ||
               atomic_cas_bool (&tp->hi_water, prev, prev + 1))
        return (prev);

      atomic_spin_nop ();
    }
}

static const int NODE_OVERHEAD = 1;

static inline int
node_nlvl (object obj)
{
  return (len_a (obj));
}

static inline object&
node_key (object obj)
{
  return (xaref (obj, 0));
}

static result<object>
make_node (interpreter *interp, int nlvl, object key)
{
  object ret = KP_TRY (alloc_array (interp, nlvl + NODE_OVERHEAD, fixint (0)));
  node_key(ret) = key;
  return (ret);
}

static inline atomic_t*
root_plen (object obj)
{
  return ((atomic_t *)&xaref(obj, -1));
}

static result<object>
make_root_node (interpreter *interp)
{
  object ret = KP_TRY (make_node (interp, MAX_DEPTH + 1, fixint (0)));
  ++as_array(ret)->data, --as_array(ret)->len;
  *root_plen(ret) = 0;
  node_key(ret) = UNBOUND;
  return (ret);
}

result<object> alloc_tuple (interpreter *interp, object tst)
{
  auto eg = KP_TRY (evh_guard::make (interp));
  object head = KP_TRY (make_root_node (interp));
  tuple *tp = alloch<tuple> ();

  tp->head = head;
  tp->hi_water = 1;
  tp->test = tst == NIL ? fixint (0) : tst;

  interp->alval = tp->as_obj ();
  gc_register (interp, tp);
  return (interp->alval);
}

static inline object&
node_next (object obj, int lvl)
{
  return (xaref (obj, NODE_OVERHEAD + lvl));
}

static const int UNLINK_FORCE = 0;
static const int UNLINK_ASSIST = 1;
static const int UNLINK_NONE = 2;

struct tuple_args
{
  object& pred;
  object& item;
  object& next;
  object& other;
  object *l_preds;
  object *l_succs;
  local_varobj<array> ars;
  valref saved;

  tuple_args (interpreter *interp, object *sp, int n, bool pr_p, bool sc_p) :
      pred (sp[0]), item (sp[1]), next (sp[2]),
      other (sp[3]), saved (interp)
    {
      this->ars.local_init (sp, n);
      *this->saved = this->ars.as_obj ();

      if (pr_p)
        {
          this->l_preds = sp + 4;
          for (object *p = this->l_preds; p < this->l_preds + MAX_DEPTH; ++p)
            *p = fixint (0);
        }
      else
        this->l_preds = nullptr;

      if (sc_p)
        {
          this->l_succs = sp + 4 + MAX_DEPTH;
          for (object *p = this->l_succs; p < this->l_succs + MAX_DEPTH; ++p)
            *p = fixint (0);
        }
      else
        this->l_succs = nullptr;

      this->pred = this->item = this->next = this->other = fixint (0);
    }
};

static inline result<int>
tuple_cmp (interpreter *interp, const tuple *tp,
           object k1, object k2)
{
  // We only need to save K1, since K2 is the caller-provided key.
  valref tmp (interp, k1);
  if (tp->test == fixint (0))
    {
      int c = KP_TRY (xcmp (interp, k1, k2));
      return (c);
    }

  KP_VTRY (interp->growstk (3));
  *interp->stkend++ = tp->test;
  *interp->stkend++ = k1;
  *interp->stkend++ = k2;
  KP_VTRY (call_n (interp, 2));

  int ret;

  if (as<int> (interp->retval, ret))
    return (ret);
  else if (as<bigint> (interp->retval))
    return (-cmp_iI (interp, interp->retval, fixint (0)));

  return (interp->raise ("type-error",
                         "comparison function must return an integer"));
}

static result<object>
find_preds_lk (interpreter *interp, tuple *tp,
               tuple_args& ap, int n, object key, int unlink)
{
  int d = -1;

  ap.pred = tp->head;
  ap.item = ap.other = fixint (0);

  for (atomic_t lvl = tp->hi_water - 1; lvl >= 0; --lvl)
    {
      if ((ap.next = node_next (ap.pred, lvl)) == fixint (0) && lvl >= n)
        continue;

      for (ap.item = ap.next; ap.item != fixint (0) ; )
        {
          ap.next = node_next (ap.item, lvl);
          d = KP_TRY (tuple_cmp (interp, tp, node_key (ap.item), key));

          if (d >= 0)
            {
              ap.other = ap.item;
              if (d == 0 && unlink != UNLINK_NONE)
                {
                  node_next(ap.pred, lvl) = ap.next;
                  ap.item = ap.next;
                  ap.next = ap.item != fixint (0) ?
                    node_next (ap.item, lvl) : fixint (0);
                }

              break;
            }

          ap.pred = ap.item, ap.item = ap.next;
        }

      if (ap.l_preds != nullptr)
        ap.l_preds[lvl] = ap.pred, ap.l_succs[lvl] = ap.item;
    }

  return (d == 0 ? ap.other : fixint (0));
}

static result<object>
tuple_get_lk (interpreter *interp, tuple *tp,
              tuple_args& ap, object key)
{
  object ret = KP_TRY (find_preds_lk (interp, tp, ap, 0, key, UNLINK_NONE));
  return (ret != fixint (0) ? node_key (ret) : UNBOUND);
}

struct tuple_inserter
{
  int call (interpreter *, object&)
    {
      return (0);
    }
};

template <typename Fn>
static result<bool> tuple_update_lk (interpreter *interp, tuple *tp,
                                     tuple_args& ap, object key, Fn& f)
{
  int n = rand_levels (interp, tp);
  object tmp = KP_TRY (find_preds_lk (interp, tp, ap, n, key, UNLINK_NONE));

  if (tmp != fixint (0))
    {
      KP_VTRY (f.call (interp, node_key (tmp)));
      return (false);
    }

  object nval = KP_TRY (make_node (interp, n, key));
  KP_VTRY (f.call (interp, node_key (nval)));

  // Set the new element's successor and link it into the tuple.
  for (int lvl = 0; lvl < n; ++lvl)
    {
      node_next(nval, lvl) = ap.l_succs[lvl];
      node_next(ap.l_preds[lvl], lvl) = nval;
    }

  ++*root_plen(tp->head);
  return (true);
}

static inline result<bool>
tuple_put_lk (interpreter *interp, tuple *tp,
              tuple_args& ap, object key)
{
  tuple_inserter ins;
  return (tuple_update_lk (interp, tp, ap, key, ins));
}

static result<object>
tuple_pop_lk (interpreter *interp, tuple *tp,
              tuple_args& ap, object key, object dfl)
{
  object ret = KP_TRY (find_preds_lk (interp, tp, ap,
                                      tp->hi_water, key, UNLINK_FORCE));
  if (ret == fixint (0))
    return (dfl);

  --*root_plen(tp->head);
  return (node_key (ret));
}

static result<object>
find_preds_mt (interpreter *interp, tuple *tp, tuple_args& ap,
               int n, object key, int unlink, object *outp = nullptr)
{
  int d = 0;

  ap.pred = tp->head;
  ap.item = fixint (0);

  if (outp)
    *outp = ap.pred;

  for (atomic_t lvl = tp->hi_water - 1; lvl >= 0; --lvl)
    {
      if ((ap.next = node_next (ap.pred, lvl)) == fixint (0) && lvl >= n)
        continue;
      else if (ap.next & EXTRA_BIT)
        return (find_preds_mt (interp, tp, ap, n, key, unlink, outp));

      for (ap.item = ap.next; ap.item != fixint (0); )
        {
          ap.next = node_next (ap.item, lvl);
          while (ap.next & EXTRA_BIT)
            {
              if (unlink == UNLINK_NONE)
                { // Skip logically deleted elements.
                  if ((ap.item = ap.next & ~EXTRA_BIT) == fixint (0))
                    break;

                  ap.next = node_next (ap.item, lvl);
                }
              else
                {
                  ap.other = atomic_cas ((atomic_t *)&node_next(ap.pred, lvl),
                                         ap.item, ap.next & ~EXTRA_BIT);
                  if (ap.other == ap.item)
                    ap.item = ap.next & ~EXTRA_BIT;
                  else
                    {
                      if (ap.other & EXTRA_BIT)
                        return (find_preds_mt (interp, tp, ap, 
                                               n, key, unlink, outp));

                      ap.item = ap.other;
                    }

                  ap.next = ap.item != fixint (0) ?
                    node_next (ap.item, lvl) : fixint (0);
                }
            }

          if (ap.item == fixint (0))
            break;

          d = KP_TRY (tuple_cmp (interp, tp, node_key (ap.item), key));
          if (d > 0 || (d == 0 && unlink != UNLINK_FORCE))
            break;

          ap.pred = ap.item, ap.item = ap.next;
        }

      if (ap.l_preds != nullptr)
        ap.l_preds[lvl] = ap.pred, ap.l_succs[lvl] = ap.item;
    }

  return (d == 0 ? ap.item : fixint (0));
}

static inline result<object>
tuple_get_mt (interpreter *interp, tuple *tp,
              tuple_args& ap, object key)
{
  object ret = KP_TRY (find_preds_mt (interp, tp, ap, 0, key, UNLINK_NONE));
  return (ret != fixint (0) ? node_key (ret) : UNBOUND);
}

template <typename Fn>
static result<bool> tuple_update_mt (interpreter *interp, tuple *tp,
                                     tuple_args& ap, object key, Fn& f)
{
  valref root (interp);
retry:
  int n = rand_levels (interp, tp);
  object tmp = KP_TRY (find_preds_mt (interp, tp, ap, n, key,
                                      UNLINK_ASSIST, &*root));
  if (tmp != fixint (0))
    {
      KP_VTRY (f.call (interp, node_key (tmp)));
      return (false);
    }

  object nval = KP_TRY (make_node (interp, n, key));
  KP_VTRY (f.call (interp, node_key (nval)));
  copy_objs (&node_next(nval, 0), ap.l_succs, n);
  ap.pred = *ap.l_preds;

  /* Try to link the new element bottom-up. If we manage to
   * do so, it becomes part of the tuple. */
  if (!atomic_cas_bool ((atomic_t *)&node_next(ap.pred, 0), *ap.l_succs, nval))
    // Another thread beat us - Retry.
    goto retry;

  for (int lvl = 1; lvl < n; ++lvl)
    while (true)
      {
        ap.pred = ap.l_preds[lvl];
        if (atomic_cas_bool ((atomic_t *)&node_next(ap.pred, lvl),
                             ap.l_succs[lvl], nval))
          // Successful link.
          break;

        // Find the new element's predecessors and successors.
        KP_TRY (find_preds_mt (interp, tp, ap, n, key, UNLINK_ASSIST));
        for (int ix = lvl; ix < n; ++ix)
          if ((ap.pred = node_next (nval, ix)) == ap.l_succs[ix])
            continue;
          else if (atomic_cas ((atomic_t *)&node_next(nval, ix),
                               ap.pred, ap.l_succs[ix]) & EXTRA_BIT)
            { // Another thread is removing this very key. Bail out.
              ap.l_preds = ap.l_succs = nullptr;
              KP_TRY (find_preds_mt (interp, tp, ap, 0, key, UNLINK_FORCE));
              return (false);
            }
      }

  /* If another thread is removing the new key just as we're adding it,
   * make sure it's been unlinked before returning. */
  if (node_next (nval, n - 1) & EXTRA_BIT)
    {
      ap.l_preds = ap.l_succs = nullptr;
      KP_VTRY (find_preds_mt (interp, tp, ap, 0, key, UNLINK_FORCE));
      return (false);
    }

  atomic_add (root_plen (*root), 1);
  return (true);
}

static inline result<bool>
tuple_put_mt (interpreter *interp, tuple *tp,
              tuple_args& ap, object key)
{
  tuple_inserter ins;
  return (tuple_update_mt (interp, tp, ap, key, ins));
}

static result<object>
tuple_pop_mt (interpreter *interp, tuple *tp,
              tuple_args& ap, object key, object dfl)
{
  valref root (interp);
  ap.item = KP_TRY (find_preds_mt (interp, tp, ap, tp->hi_water,
                                   key, UNLINK_ASSIST, &*root));

  if (ap.item == fixint (0))
    return (dfl);

  ap.other = fixint (0);
  for (int lvl = node_nlvl (ap.item) - 1; lvl >= 0; --lvl)
    {
      ap.other = xaref (ap.item, lvl);
      do
        {
          ap.next = ap.other;
          ap.other = atomic_cas ((atomic_t *)&xaref(ap.item, lvl),
            (atomic_t)ap.next, (atomic_t)ap.next | EXTRA_BIT);

          if (ap.other & EXTRA_BIT)
            {
              if (lvl == 0)
                return (dfl);
              break;
            }
        }
      while (ap.next != ap.other);
    }

  // Unlink the item.
  valref ret (interp, ap.item);
  KP_VTRY (find_preds_mt (interp, tp, ap, 0, key, UNLINK_FORCE));
  atomic_add (root_plen (*root), -1);
  return (node_key (*ret));
}

result<object> tuple_get (interpreter *interp, object tx,
                          object key, object dfl, bool mtsafe)
{
  object space[4];
  tuple_args args (interp, space, KP_NELEM (space), false, false);
  auto fn = mtsafe ? tuple_get_mt : tuple_get_lk;
  interp->retval = KP_TRY (fn (interp, as_tuple (tx), args, key));

  if (interp->retval == UNBOUND)
    interp->retval = dfl;
  return (interp->retval);
}

result<object> get_o (interpreter *interp, object tx,
                      object key, object dfl)
{
  return (tuple_get (interp, tx, key,
                     dfl == UNBOUND ? NIL : dfl, !singlethr_p ()));
}

result<object> tuple_nearest (interpreter *interp, object tx, object key,
                              object *pred, object *succ)
{
  object space[4 + MAX_DEPTH * 2];
  tuple_args args (interp, space, KP_NELEM (space), true, true);
  tuple *tp = as_tuple (tx);

  object ret = KP_TRY (singlethr_p () ?
    find_preds_lk (interp, tp, args, 0, key, UNLINK_NONE) :
    find_preds_mt (interp, tp, args, 0, key, UNLINK_NONE));

  if (ret != fixint (0))
    kp_return (ret);

  if (pred)
    for (int n = 0; n < MAX_DEPTH; ++n)
      if ((ret = args.l_preds[n]) != fixint (0) &&
          node_key (ret) != UNBOUND)
        {
          *pred = ret;
          break;
        }

  if (succ)
    for (int n = 0; n < MAX_DEPTH; ++n)
      if ((ret = args.l_succs[n]) != fixint (0) &&
          node_key (ret) != UNBOUND)
        {
          *succ = ret;
          break;
        }

  kp_return (fixint (0));
}

result<bool> tuple_put (interpreter *interp, object tx,
                        object key, bool mtsafe)
{
  tuple *tp = as_tuple (tx);
  if (kp_unlikely (tp->flagged_p (FLAGS_CONST)))
    return (interp->raise_const ());

  object space[4 + MAX_DEPTH * 2];
  tuple_args args (interp, space, KP_NELEM (space), true, true);
  auto fn = mtsafe ? tuple_put_mt : tuple_put_lk;
  bool ret = KP_TRY (fn (interp, tp, args, key));

  if (ret)
    deref (gc_wbarrier (interp, tx, key));

  return (ret);
}

result<object> tuple_pop (interpreter *interp, object tx,
                          object key, object dfl, bool mtsafe)
{
  tuple *tp = as_tuple (tx);
  if (kp_unlikely (tp->flagged_p (FLAGS_CONST)))
    return (interp->raise_const ());

  object space[4];
  tuple_args args (interp, space, KP_NELEM (space), false, false);
  auto fn = mtsafe ? tuple_pop_mt : tuple_pop_lk;
  object ret = KP_TRY (fn (interp, tp, args, key, dfl));
  kp_return (ret);
}

result<object> npop_o (interpreter *interp, object tx, object key, object dfl)
{
  return (tuple_pop (interp, tx, key, dfl, !singlethr_p ()));
}

result<object> nput_o (interpreter *interp, object tx,
                       object key, object val)
{
  if (val == NIL)
    return (npop_o (interp, tx, key, val));

  bool ret = KP_TRY (tuple_put (interp, tx, key, !singlethr_p ()));
  kp_return (ret ? symbol::t : NIL);
}

uint32_t len_o (object tx)
{
  return (*root_plen (as_tuple(tx)->head));
}

result<bool> tuple_clr (interpreter *interp, object tx)
{
  tuple *tp = as_tuple (tx);
  tp->head = KP_TRY (make_root_node (interp));   // Install an empty root
  atomic_mfence_rel ();
  return (true);
}

struct tuple_nzapper
{
  valref ret;
  int stack_idx;
  int nargs;

  tuple_nzapper (interpreter *interp) : ret (interp)
    {
    }

  result<int> init (interpreter *interp, object fn,
                    object *argv, int argc)
    {
      KP_VTRY (interp->growstk (argc + 2));
      *interp->stkend++ = fn;
      *interp->stkend++ = fixint (0);
      this->stack_idx = interp->stklen () - 1;

      for (int i = 0; i < argc; ++i)
        *interp->stkend++ = argv[i];

      this->nargs = argc + 1;
      return (0);
    }

  result<int> call (interpreter *interp, object& out)
    {
      *this->ret = interp->stack[this->stack_idx] = out;
      out = KP_TRY (call_n (interp, this->nargs));
      return (0);
    }
};

result<object> nzap_o (interpreter *interp, object obj, object key,
                       uint32_t flags, object fn, object *argv, int argc)
{
  if (flags & NZAP_DFL)
    return (interp->raise ("arg-error", "default argument not supported"));

  tuple *tp = as_tuple (obj);
  if (kp_unlikely (tp->flagged_p (FLAGS_CONST)))
    return (interp->raise_const ());

  object space[4 + MAX_DEPTH * 2];
  tuple_args args (interp, space, KP_NELEM (space), true, true);
  tuple_nzapper nz (interp);

  KP_VTRY (nz.init (interp, fn, argv, argc));
  auto fx = (flags & NZAP_NOMT) ? tuple_update_lk<tuple_nzapper> :
                                  tuple_update_mt<tuple_nzapper>;
  bool wb = KP_TRY (fx (interp, tp, args, key, nz));

  if (wb)
    deref (gc_wbarrier (interp, obj, key));

  if (flags & NZAP_PREV)
    interp->retval = *nz.ret;

  return (interp->retval);
}

tuple::iterator::iterator (interpreter *interp, object tx) : node (interp, tx)
{
  *this->node = node_next (as_tuple(tx)->head, 0);
}

tuple::iterator& tuple::iterator::operator++ ()
{
  while (true)
    if (*this->node == fixint (0) ||
        ((*this->node = node_next (*this->node, 0)) & EXTRA_BIT) == 0)
      break;

  return (*this);
}

tuple::iterator tuple::iterator::operator++ (int)
{
  iterator rv { interpreter::self (), *this };
  ++*this;
  return (rv);
}

object tuple::iterator::operator* () const
{
  return (node_key (*this->node));
}

result<object> iter_o (interpreter *interp, object obj, object token, bool adv)
{
  if (token == UNBOUND)
    {
      interp->retval = node_next (as_tuple(obj)->head, 0);
      if (interp->retval == fixint (0))
        interp->retval = NIL;

      return (interp->retval);
    }
  else if (!array_p (token) || len_a (token) <= NODE_OVERHEAD)
    return (interp->raise ("arg-error", "invalid token"));
  else if (!adv)
    kp_return (node_key (token));

  valref tmp (interp, token);
  while (true)
    {
      *tmp = node_next (*tmp, 0);
      if ((*tmp & EXTRA_BIT) == 0)
        break;
      else if (!array_p (*tmp) || len_a (*tmp) <= NODE_OVERHEAD)
        return (interp->raise ("arg-error", "invalid token"));
    }

  kp_return (*tmp == fixint (0) ? NIL : *tmp);
}

result<int64_t> write_o (interpreter *interp, stream *strm,
                         object tx, io_info& info)
{
  tuple::iterator it (interp, tx);
  int64_t ret = 0;

  ret += KP_TRY (strm->write (interp, "#(", 2));
  if (it.valid ())
    while (true)
      {
        ret += KP_TRY (xwrite (interp, strm, *it, info));
        if (!(++it).valid ())
          break;

        ret += KP_TRY (strm->putb (interp, ' '));
      }

  ret += KP_TRY (strm->putb (interp, ')'));
  return (ret);
}

result<int64_t> pack_o (interpreter *interp, stream *strm,
                        object obj, pack_info& info)
{
  pack_info::eviction_guard eg { info, true };
  tuple *tp = as_tuple (obj);
  int64_t ret = KP_TRY (tp->test == fixint (0) ?
                        result<int64_t> (strm->putb (interp, PACK_NIL)) :
                        xpack (interp, strm, tp->test, info));

  ret += KP_TRY (strm->write (interp, &tp->hi_water));
  for (tuple::iterator it (interp, obj); it.valid (); ++it)
    { ret += KP_TRY (xpack (interp, strm, *it, info)); }

  ret += KP_TRY (strm->putb (interp, PACK_END));
  return (ret);
}

result<object> unpack_o (interpreter *interp, stream *strm,
                         pack_info& info, bool save)
{
  int tst = KP_TRY (strm->peekb (interp));
  valref tmp (interp, NIL), saved_pos (interp, *info.offset);

  if (tst == PACK_NIL)
    deref (strm->getb (interp));
  else if (tst < 0)
    return (info.error ("invalid tuple comparator"));
  else
    { *tmp = KP_TRY (xunpack (interp, strm, info)); }

  atomic_t hw;
  {
    bool rv = KP_TRY (strm->sread (interp, &hw));
    if (!rv)
      return (info.error ("failed to read tuple hi-water"));
  }

  valref ret = KP_TRY (alloc_tuple (interp, *tmp));
  as_tuple(*ret)->hi_water = hw;

  if (save)
    KP_VTRY (info.add_mapping (interp, *saved_pos, *ret));

  while (true)
    {
      tst = KP_TRY (strm->peekb (interp));
      if (tst == PACK_END)
        {
          deref (strm->getb (interp));
          break;
        }
      else
        {
          *tmp = KP_TRY (xunpack (interp, strm, info));
          KP_VTRY (tuple_put (interp, *ret, *tmp, false));
        }
    }

  kp_return (*ret);
}

static const uint32_t TUPLE_HASH_SEED = 1701147252;

result<uint32_t> hash_o (interpreter *interp, object obj)
{
  uint32_t ret = TUPLE_HASH_SEED;
  for (tuple::iterator it (interp, obj); it.valid (); ++it)
    {
      uint32_t rv = KP_TRY (xhash (interp, *it));
      ret = mix_hash (ret, rv);
    }

  return (ret);
}

result<object> copy_o (interpreter *interp, object obj, bool deep)
{
  valref ret = KP_TRY (alloc_tuple (interp, as_tuple(obj)->test));
  tuple::iterator it (interp, obj);

  if (deep)
    {
      valref unk (interp);
      for (; it.valid (); ++it)
        {
          *unk = KP_TRY (copy (interp, *it, true));
          KP_VTRY (tuple_put (interp, *ret, *unk, false));
        }
    }
  else
    for (; it.valid (); ++it)
      KP_VTRY (tuple_put (interp, *ret, *it, false));

  kp_return (*ret);
}

struct tuple_fct_data
{
  static const int SP_SIZE = 5 + MAX_DEPTH * 2;

  object space[SP_SIZE];
  tuple *tp1;
  tuple *tp2;
  tuple *ret;
  lazy<tuple_args> args;
  tuple::iterator i1;
  tuple::iterator i2;

  tuple_fct_data (interpreter *interp, object t1, object t2) :
      i1 (interp, t1), i2 (interp, t2)
    {
      this->tp1 = as_tuple (t1);
      this->tp2 = as_tuple (t2);
      new (&this->args) tuple_args (interp, this->space, SP_SIZE, true, true);
      this->space[SP_SIZE - 1] = fixint (0);
    }

  result<int> init (interpreter *interp)
    {
      if (this->tp1->test != this->tp2->test)
        return (interp->raise ("arg-error",
                               "comparison functions must be equal"));

      object tup = KP_TRY (alloc_tuple (interp, this->tp1->test));
      this->ret = as_tuple (this->space[SP_SIZE - 1] = tup);
      return (0);
    }

  ~tuple_fct_data ()
    {
      destroy (&this->args);
    }
};

static inline result<int>
put_and_adv (interpreter *interp, tuple *tp,
             tuple_args *args, tuple::iterator& it)
{
  KP_VTRY (tuple_put_lk (interp, tp, *args, *it));
  deref (gc_wbarrier (interp, tp->as_obj (), *it++));
  return (0);
}

result<object> tuple_union (interpreter *interp, object t1, object t2)
{
  tuple_fct_data df (interp, t1, t2);
  KP_VTRY (df.init (interp));

  while (true)
    {
      int d;

      if (!df.i1.valid ())
        {
          while (df.i2.valid ())
            KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i2));
          break;
        }
      else if (!df.i2.valid ())
        {
          while (df.i1.valid ())
            KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i1));
          break;
        }

      d = KP_TRY (tuple_cmp (interp, df.ret, *df.i1, *df.i2));

      if (d < 0)
        KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i1));
      else if (d > 0)
        KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i2));
      else
        {
          KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i1));
          ++df.i2;
        }
    }

  kp_return (df.ret->as_obj ());
}

result<object> tuple_intersect (interpreter *interp, object t1, object t2)
{
  tuple_fct_data df (interp, t1, t2);
  KP_VTRY (df.init (interp));

  while (df.i1.valid () && df.i2.valid ())
    {
      int d = KP_TRY (tuple_cmp (interp, df.ret, *df.i1, *df.i2));

      if (d < 0)
        ++df.i1;
      else if (d > 0)
        ++df.i2;
      else
        {
          KP_TRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i1));
          ++df.i2;
        }
    }

  kp_return (df.ret->as_obj ());
}

result<object> tuple_diff (interpreter *interp, object t1, object t2)
{
  tuple_fct_data df (interp, t1, t2);
  KP_VTRY (df.init (interp));

  while (df.i1.valid () && df.i2.valid ())
    {
      int d = KP_TRY (tuple_cmp (interp, df.ret, *df.i1, *df.i2));

      if (d < 0)
        KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i1));
      else
        {
          ++df.i2;
          if (d == 0)
            ++df.i1;
        }
    }

  while (df.i1.valid ())
    KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i1));

  kp_return (df.ret->as_obj ());
}

result<object> tuple_symdiff (interpreter *interp, object t1, object t2)
{
  tuple_fct_data df (interp, t1, t2);
  KP_VTRY (df.init (interp));

  while (true)
    {
      int d;

      if (!df.i1.valid ())
        {
          while (df.i2.valid ())
            KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i2));
          break;
        }
      else if (!df.i2.valid ())
        {
          while (df.i1.valid ())
            KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i1));
          break;
        }

      d = KP_TRY (tuple_cmp (interp, df.ret, *df.i1, *df.i2));
      if (d < 0)
        KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i1));
      else if (d > 0)
        KP_VTRY (put_and_adv (interp, df.ret, df.args.ptr (), df.i2));
      else
        ++df.i1, ++df.i2;
    }

  kp_return (df.ret->as_obj ());
}

KP_DECLS_END

