﻿//#define asymmetrical_counts
//#define SKIP

using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using System.Reactive.Concurrency;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Threading;
using System.Threading.Tasks;
using System.Linq;

using alib;
using alib.Collections;
using alib.Concurrency;
using alib.Debugging;
using alib.Enumerable;
using alib.Memory;

namespace agree
{
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	/// <summary>
	/// A unifier inspired by Tomabechi (1991,1992) but with departures described in UW-CLMA MS thesis (Slayden 2012).
	/// </summary>
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	public unsafe sealed partial class Unification : IDisposable
	{
		const int SHL_PS = 5;				// log2(sizeof(Scratch))
		const int GEN_COREF = 1;
		const int GEN_INCR = 8;
		const BindingFlags bf = (BindingFlags)0x3E;

		///									ERG 10181 'hike' values:
		const int ParticipantsMax = 200;	// 81
		const int SlotsMax = 3000;			// 2359
		const int CorefsMax = 400;			// 255
		const int TfsNodeMax = 1600;		// 1469
		const int SkipMax = 512;

		static Unification()
		{
			RuntimeHelpers.PrepareMethod(typeof(TypeUtils).GetMethod("UnifyTypesHiLo").MethodHandle);
			foreach (var mi in typeof(pinfo).GetMethods(bf))
				RuntimeHelpers.PrepareMethod(mi.MethodHandle);
			foreach (var mi in typeof(Slot).GetMethods(bf))
				RuntimeHelpers.PrepareMethod(mi.MethodHandle);
			foreach (var mi in typeof(Unification).GetMethods(bf))
				if (!mi.IsGenericMethod)
					RuntimeHelpers.PrepareMethod(mi.MethodHandle);

			u_spares = new Unification[12];

#if DEBUG && false
			DefaultScheduler.Instance.SchedulePeriodic(cu_active, TimeSpan.FromMilliseconds(200), prv =>
			{
				int cur;
				if ((cur = cu_active) != prv)
					lock (Console.Out)
					{
						var cp = new { left = Console.CursorLeft, top = Console.CursorTop };
						Console.SetCursorPosition(Console.WindowLeft, Console.WindowTop);
						Console.BackgroundColor = ConsoleColor.Yellow;
						Console.ForegroundColor = ConsoleColor.Black;
						Console.WriteLine(cur.ToString().PadLeft(5));
						Console.ResetColor();
						Console.SetCursorPosition(cp.left, cp.top);
					}
				return cur;
			});
#endif
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// A singleton Unification object for each thread is kept as a ThreadStatic object. This saves on
		/// object cycling but makes re-entrancy of top-level unify calls expensive. This is only an issue 
		/// for expansion; once a well-formed TFS exists for each type in the grammar, a given unification 
		/// will produce well-formed TFSes without top-level recursion
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

		[ThreadStatic]
		static Unification _inst;

		static Unification[] u_spares;
		static int cu_active;

		public static Unification allocU()
		{
			Unification u;
			if ((u = _inst) == null)
				_inst = u = new Unification { f_busy = 1 };
			else if (u.f_busy == 0)
				u.f_busy = 1;
			else
			{
				Unification[] _tmp;
				int j = 0;
				if ((_tmp = u_spares) != null)
					for (; j < _tmp.Length; j++)
					{
						if ((u = _tmp[j]) == null)
							break;
						if (u.f_busy == 0 && Interlocked.CompareExchange(ref u.f_busy, 1, 0) == 0)
							goto ok;
					}

				u = new Unification { f_busy = 1 };
				Thread.MemoryBarrier();
				if (_tmp != null)
				{
					while (j < _tmp.Length)
					{
						if (_tmp[j] == null && Interlocked.CompareExchange(ref _tmp[j], u, null) == null)
							break;
						j++;
					}
				}
			ok: ;
			}
			Interlocked.Increment(ref cu_active);
			return u;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		bool freeU(bool ret) { freeU(); return ret; }
		int freeU(int ret) { freeU(); return ret; }
		ArrayTfs freeU()
		{
			Slot** pcf = pps_corefs;
			while (0 < c_corefs--)
				*pcf++ = default(Slot*);

			pinfo* pi = pbx + 1;
			for (int i = 1; i <= c_participants; i++)
				(pi++)->cleanup();
#if DEBUG
			_rfi = null;
#endif
			if ((gen += 8) == 0)
				_reset_gen_sequence();

			this.f_busy = 0;
			Interlocked.Decrement(ref cu_active);
			return default(ArrayTfs);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// once expansion is complete, the spare unifiers can be discarded, retaining just one per thread
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		public static void ReleaseSpares()
		{
			Unification u;
			Unification[] _tmp;
			if ((_tmp = Interlocked.Exchange(ref u_spares, null)) != null)
				for (int j = 0; j < _tmp.Length; j++)
					if ((u = _tmp[j]) != null)
						u.Dispose();
		}

		void Dispose(bool disposing)
		{
			if (Interlocked.Increment(ref disposed) != 1)
				return;

			Marshal.FreeHGlobal((IntPtr)pps_skipped);
			pps_skipped = null;
			Marshal.FreeHGlobal((IntPtr)pps_corefs);
			pps_corefs = null;

			_ubase->Free();
			Marshal.FreeHGlobal((IntPtr)_ubase);
			_ubase = null;
			ps_base = null;

			_upbx->Free();
			Marshal.FreeHGlobal((IntPtr)_upbx);
			_upbx = null;
			pbx = null;
		}

		int disposed;
		~Unification() { Dispose(false); }
		public void Dispose() { Dispose(true); GC.SuppressFinalize(this); }

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Unification()
		{
			int cb_pbx = sizeof(GCHandle);
			cb_pbx += sizeof(pinfo) * (ParticipantsMax + 1);	// +1 is for the uix terminator, not the 1-based indexing
			_upbx = ((GCHandle*)Marshal.AllocHGlobal(cb_pbx));
#if DEBUG
			Kernel32.ZeroMemory(_upbx, cb_pbx);
#endif
			*_upbx = GCHandle.Alloc(this, GCHandleType.Normal);

			pbx = ((pinfo*)_upbx) + 1;			// reserved space for GCHandle
			pbx--;								// 1-based indexing
			for (int i = 1; i <= ParticipantsMax; i++)
				pbx[i].tfsix = i;
			pbx[1].uix0 = 1;

			_ubase = (GCHandle*)Marshal.AllocHGlobal(sizeof(Slot) * (SlotsMax + 1));
			*_ubase = GCHandle.Alloc(this, GCHandleType.Normal);
			ps_base = ((Slot*)_ubase) + 1;	// reserved space for GCHandle
			_reset_gen_sequence();

			pps_corefs = (Slot**)Marshal.AllocHGlobal(sizeof(Slot*) * CorefsMax);
			Kernel32.ZeroMemory(pps_corefs, CorefsMax * sizeof(Slot*));

			pps_skipped = (Slot**)Marshal.AllocHGlobal(sizeof(Slot*) * SkipMax);
			Kernel32.ZeroMemory(pps_skipped, SkipMax * sizeof(Slot*));

			_debug_new();
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// re-used fields shared across multiple operations. In the case of the scratch slots, they are protected
		/// by the generation counter technique, using the following generation counter values:
		///		xxx00	gen				:	initialized by pass 1
		///		xxx01	nwf = gen+1		:	initialized by pass 1 (not well-formed)
		///		xxx10	ugen = gen+2	:	pass 2 target that has only been visited indirectly (by forwarding)
		///		xxx11	cgen = gen+3	:	counted and fully visited pass 2 node
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		const uint g_nwf = 1;

		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		GCHandle* _upbx;
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		GCHandle* _ubase;
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		pinfo* pbx;
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		public Slot* ps_base;
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		Slot** pps_corefs;
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		Slot** pps_skipped;

		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		int f_busy;
		[DebuggerDisplay("({gen,h})  {_summary_counts(0,null),nq}")]
		uint gen;

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		void _reset_gen_sequence()
		{
			Slot* ps = ps_base;
			for (short i = 0; i < SlotsMax; i++, ps++)
			{
				ps->gen = 0;
				ps->uix = i;
			}
			this.gen = 8;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// fields initialized for each operation
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

		/// external reference and restriction
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		public TypeUtils tu { get { return r.FeatMgr.tu; } }
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		public FeatMgr ftm { get { return r.FeatMgr; } }
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		public Restrictor r;

		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		ushort** rgpfix;

		/// control options
		Tfs.Flags opts;
		[DebuggerDisplay("{(tfsix_nwf==0?\"(all inputs well-formed)\":tfsix_nwf.ToString()),nq}")]
		int tfsix_nwf;

		/// allocation/management of scratch slots and TFS participants
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		int c_participants;
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		int c_corefs;
#if SKIP
		//[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		//Scratch** pps_skip;
		[DebuggerBrowsable(DebuggerBrowsableState.Never)]
		HashSet<IntPtr> hs_skip;
#endif

		public int c_init_nodes;
		/// for writing
#if P1_COUNT
		public int count_p1;
#endif
		public int count_p3;
		int next_mark;
		int next_coref_mark;

#if DEBUG && REVERSE_INDEX
		public static System.IO.TextWriter tw_failure;
#endif

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Unification init_reader(Restrictor r, Tfs.Flags opts)
		{
			if (r.opts != (opts & (Tfs.Flags.Restrict | Tfs.Flags.DeleteArgs)))
				throw new Exception();

			this.r = r;
			this.rgpfix = r.R;
			this.opts = opts;
			this.tfsix_nwf = 0;
#if P1_COUNT
			this.count_p1 = 0;
#endif
			this.c_init_nodes = 0;
#if DEBUG
			this.count_p3 = 0;
			this.next_coref_mark = 0;
			this.next_mark = 0;
#endif

			this.c_participants = 0;
			this.c_corefs = 0;
#if SKIP
			//this.pps_skip = pps_skipped;
			this.hs_skip = null;
#endif

			Debug.Assert(pbx[1].uix0 == 1 && ps_base->gen == 0);
			return this;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#if false		
		Slot* _init_node(int tfsix, int ix1)
		{
			arr_tfs_entry ate;
			((pbx + tfsix)->tfs).GetIndexData(ix1, &ate);

			Slot* ps = ps_base + ((pbx + tfsix)->uix0 + ix1);
			_init_node(ref ps, tfsix, ate.e);
			return ps;
		}
#else
		[MethodImpl(MethodImplOptions.AggressiveInlining)]
		void _init_node(ref Slot* ps, int tfsix, Edge e)
		{
			ps->_init_node(tfsix,
				e,
				tfsix == tfsix_nwf && (e.FlagsId & Edge.Flag.EtmNonBareType) != 0 ? gen + g_nwf : gen);

			if (e.FlagsId < 0)
			{
				Slot** pps = ((pbx + tfsix)->coref_designations - e.Mark);
				if (*pps == null)
				{
					*pps = ps;
				}
				else
				{
					Slot* nps;
					int fwd, dst;
					if ((fwd = (nps = *pps)->forward) < 0)
						while ((dst = (nps = (Slot*)((long)ps_base - fwd))->forward) < 0)
							fwd = dst;
					ps->forward = fwd;
					ps = nps;
				}
			}
#endif

#if P1_COUNT
			ps->c_cnt = 0;
#endif
			c_init_nodes++;
		}

#if false
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		void _init_node(Slot* ps, int tfsix, RootNodeData rnd)
		{
			ps->_init_node(tfsix,
				rnd.e,
				tfsix == tfsix_nwf && (rnd.e.FlagsId & Edge.Flag.EtmNonBareType) != 0 ? gen + g_nwf : gen);

			if (rnd.e.FlagsId < 0)
			{
				Slot** pps = ((pbx + tfsix)->coref_designations - rnd.e.Mark);
				Debug.Assert(*pps == null);
				*pps = ps;
			}

#if P1_COUNT
			ps->c_cnt = 0;
#endif
			c_init_nodes++;
		}
#endif

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		bool _load_node_no_fwd(out Slot* ps, int tfsix, int src_slot_ix_1based)
		{
			pinfo* pi;
			ps = ps_base + (((pi = pbx + tfsix))->uix0 + src_slot_ix_1based);
			Slot** pps;
			if (ps->gen >= gen)
				return true;// ps->forward != 0;	(assuming single-use: if seen, then it must be coreferenced)

			ps->tfsix = (short)tfsix;
			ps->e = pi->entries[src_slot_ix_1based - 1].e;
			ps->gen = gen;
#if P1_COUNT
			ps->c_cnt = 0;
#endif
			c_init_nodes++;

			if (ps->f >= 0)
			{
				ps->forward = 0;
				return false;
			}

			throw new Exception("look! the following line has a bug.");
			ps->f = ~Edge.Flag.Coreference;
			if (*(pps = pi->coref_designations - ps->m_src) == null)
			{
				*pps = ps;
				ps->forward = 0;
			}
			else
			{
				Slot* nps;
				int fwd, dst;
				if ((fwd = (nps = *pps)->forward) < 0)
					while ((dst = (nps = (Slot*)((long)ps_base - fwd))->forward) < 0)
						fwd = dst;
				ps->forward = fwd;
				ps = nps;
			}
			return true;
		}


		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		///
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Slot* _source_fetch(Slot* ps, pinfo* pi, int d)
		{
			Debug.Assert(ps->m_src != 0 && ps->forward == 0);

			Slot* nps;
			int fwd;
			Edge e;

			if ((fwd = pi->_fetch_ix(d, ps->m_src, &e)) == 0)
				return null;

#if DEBUG && UNIF_BREAKPOINTS
			if (uix_bp[pi->uix0 + fwd])
				Debugger.Break();
#endif

			nps = (Slot*)((long)ps_base + ((pi->uix0 + fwd) << SHL_PS));

#if PRE_PLUG
			Debug.Assert(nps->gen != gen + 5 || nps->forward == 0);
			//nps->gen = gen;
#endif

			if (nps->gen < gen)
			{
				d = pi->tfsix;
				nps->tfsix = (short)d;
				if (ps->gen == gen + 3)
					nps->gen = gen + 3;
				else
					nps->gen = d == tfsix_nwf && (e.FlagsId & Edge.Flag.EtmNonBareType) != 0 ?
									gen + g_nwf :
									gen;
#if DEBUG
				nps->f = 0;
#endif
				c_init_nodes++;
#if P1_COUNT
				nps->c_cnt = 0;
#endif
				if (e.FlagsId < 0)
				{
					Slot** pps;
					if (*(pps = pi->coref_designations - e.Mark) != null)
					{
						ps = *pps;

#if PRE_PLUG
						Debug.Assert(ps->gen != gen + 5 || ps->forward == 0);
						//ps->gen = gen;
#endif

						if ((fwd = ps->forward) < 0)
							goto ps_fwd;
						nps->forward = (int)((long)ps_base - (long)ps);

						if (ps->gen == gen + 3 && nps->gen != gen + 3)
							ps->gen = gen;

						return ps;
					}
					*pps = nps;
				}
				nps->forward = 0;
				nps->e_ul = *(ulong*)&e & 0xFFFFFFFF7FFFFFFF;
			}
			else
			{

				if ((fwd = nps->forward) < 0)
					goto ps_fwd;
			}
			return nps;
		ps_fwd:
			do
				ps = (Slot*)((long)ps_base - (d = fwd));
			while ((fwd = ps->forward) < 0);

			nps->forward = d;

			if (ps->gen == gen + 3 && nps->gen != gen + 3)
				ps->gen = gen;

			return ps;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		[MethodImpl(MethodImplOptions.AggressiveInlining)]
		bool _duplex_node(Slot* ps0, Slot* ps1)
		{
			/// if unified types are indifferent, steer away from not-well-formed TFS
			return (ps0->f > ps1->f || (ps0->f == ps1->f && ps1->tfsix == tfsix_nwf)) ?
						_duplex_hilo(ps1, ps0) :
						_duplex_hilo(ps0, ps1);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// comparison of flags values:
		/// - coref bit is already stripped
		///	- leaf nodes (lower in the type hierarchy) have higher type IDs (in the lowest bits)
		///	- the (high) position of EtmNonBare bit implies that any type with features is below 
		///	  any subsuming type with no features
		///	- the (high) position of EtmLeafType implies that leaf nodes are below any subsuming
		///	  non-leaf type 
		///	- the (high) position of EtmString implies that string and skolem _values_ are below
		///	  *string*
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		bool _duplex_hilo(Slot* hi, Slot* lo)
		{
#if false
			if (hi == lo || lo->gen < gen || hi->gen < gen || hi->forward != 0 || lo->forward != 0)
				Nop.X();
			if (!(hi->f < lo->f || (hi->f == lo->f && (lo->tfsix != tfsix_nwf || hi->tfsix == tfsix_nwf))))
				Nop.X();
#endif
			Debug.Assert(hi != lo && lo->gen >= gen && hi->gen >= gen && hi->forward == 0 && lo->forward == 0);
			Debug.Assert(hi->f < lo->f || (hi->f == lo->f && (lo->tfsix != tfsix_nwf || hi->tfsix == tfsix_nwf)));

#if PRE_PLUG
			if (lo->gen == gen + 5)
				lo->gen = gen;
#endif

			Edge.Flag f, fh, fl;
			if ((fh = hi->f) == (fl = lo->f) || fh == 0)
			{
				if ((fl & Edge.Flag.EtmNonBareType) == 0)
					goto finish;
				f = fl;
			}
			else
			{
				if ((fh & fl) >= Edge.Flag.EtmLeafType || (f = ftm.tu.UnifyTypesHiLo(fh, fl)) < 0)
				{
#if DEBUG && REVERSE_INDEX
					var _tmp = tw_failure;
					if (_tmp != null)
					{
						lock (_tmp)
						{
							_tmp.WriteLine("\r\nunif-fail: ---------------------");

							_tmp.WriteLine("\t{0}", tu.TypeNameOrStringValueDisplay(fh));
							_tmp.WriteLine(UnifierPaths(hi).Select(up => "\t\t" + up.ToString()).StringJoin(Environment.NewLine));

							_tmp.WriteLine("\t{0}", tu.TypeNameOrStringValueDisplay(fl));
							_tmp.WriteLine(UnifierPaths(lo).Select(up => "\t\t" + up.ToString()).StringJoin(Environment.NewLine));
						}
					}
#endif
					return false;
				}
				if ((f & Edge.Flag.EtmNonBareType) == 0)
				{
#if P1_COUNT
					Debug.Assert(lo->c_cnt == 0 && hi->c_cnt == 0);
#endif
					lo->f = f;
					goto finish;
				}
			}
			Debug.Assert(lo->m_src != 0 || f != fl);

			if (hi->gen == gen + 3 && lo->gen != gen + 3)
				hi->gen = gen;
			if (lo->gen == gen + 3 && hi->gen != gen + 3)
				lo->gen = gen;

			/// propagate well-formedness from argument TFSes if possible and/or necessary
			if (tfsix_nwf > 0)
			{
				/// in our uses it is never the case that both arguments are not-well-formed
				Debug.Assert(lo->gen != gen + g_nwf || hi->gen != gen + g_nwf);
				Debug.Assert(fh != fl || lo->gen != gen + g_nwf);

				if (lo->gen == gen + g_nwf)
					return _make_well_formed(f, hi, lo);
				hi->gen = gen;
			}

			if (f != fl)
				return _make_well_formed(f, hi, lo);

			Debug.Assert(lo->m_src != 0);
			if (hi->m_src == 0)
			{
#if SKIP
				if (lo->gen == gen + 3)
					Nop.X();
				//if (lo->gen != gen + 3)
				if (hs_skip != null)
				{
					//*pps_skip++ = lo;
					//if (lo->gen == gen)
						//lo->gen = gen + 5;
					//hs_skip.Add((IntPtr)hi);
					hs_skip.Add((IntPtr)lo);
				}
#endif
			}
			else
			{
				if (!_joint_descent(*(ushort**)((long)rgpfix + ((int)f << 3)), hi, lo))
					return false;
			}

		finish:
			Debug.Assert(hi->forward == 0 && lo->forward == 0);

			hi->forward = (int)((long)ps_base - (long)lo);

			if (lo->gen == gen + 3 && hi->gen != gen + 3)
				lo->gen = gen;

#if PRE_PLUG
			if (hi->gen == gen + 5)
				hi->gen = gen;
#endif

			Debug.Assert(hi->gen != gen + g_nwf && lo->gen != gen + g_nwf);
			return true;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		bool _make_well_formed(Edge.Flag f, Slot* hi, Slot* lo)
		{
			Debug.Assert((f & Edge.Flag.EtmNonBareType) != 0);

			Slot* ps = _init_tfs_top_edge(_get_expand_tfs(f));

			if (hi->gen == gen + 3 && lo->gen == gen + 3)
				ps->gen = gen + 3;

			if (!_duplex_hilo(lo, ps))
				return false;

			return _duplex_hilo(hi, ps);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		[MethodImpl(MethodImplOptions.AggressiveInlining)]
		bool _joint_descent(ushort* pfix, Slot* hi, Slot* lo)
		{
			Debug.Assert(hi != lo);
			Debug.Assert(hi->f < lo->f || (hi->f == lo->f && (lo->tfsix != tfsix_nwf || hi->tfsix == tfsix_nwf)));

			Slot* nh, nl;
			pinfo* pihi = pbx + hi->tfsix, pilo = pbx + lo->tfsix;
			int c = *(pfix - 1);
			do
			{
#if asymmetrical_counts
				if ((nh = _source_fetch(hi, pihi, *pfix)) == null)
				{
#if P1_COUNT
					throw new NotSupportedException();
#endif
					if ((nl->f & Edge.Flag.EtmNonBareType) != 0)
						*pps_skip++ = nl;
					continue;
				}
				if (nh == (nl = _source_fetch(lo, pilo, *pfix)))
					continue;
#else
				if ((nh = _source_fetch(hi, pihi, *pfix)) == (nl = _source_fetch(lo, pilo, *pfix)))
				{
					if (nl == null)
						c--;
					continue;
				}

				if (nh == null)
				{
#if SKIP
					if ((nl->f & Edge.Flag.EtmNonBareType) != 0)
					{
						if (nl->gen == gen + 3 && hi->gen != gen + 3)
							nl->gen = gen;
						if (nl->gen == gen + 3)
							Nop.X();

						//if (nl->gen != gen + 3)
						if (hs_skip != null)
						{
							//*pps_skip++ = nl;
							//if (nl->gen == gen)
							    //nl->gen = gen + 5;
							hs_skip.Add((IntPtr)nl);
						}
					}
#endif
					continue;
				}
#endif
				if (nl == null)
				{
#if DEBUG
					/// only assert that 'lo' can't be used if it does not have deleted args, or if 
					/// the current feature is not an ARGS feature.
					//Tfs.Flags tfsf = pbx[lo->tfsix].tfs.flags;
					//if (((tfsf & Tfs.Flags.DeleteArgs) == 0 || !tm.restrictors.r_args[*pfix]) &&
					//    ((tfsf & Tfs.Flags.Restrict) == 0 || !tm.restrictors.r_pack[*pfix]) &&
					//    (opts & Tfs.Flags.CheckOnly) == 0)
					//    throw new Exception();
#endif
					c--;
					continue;
				}

				if (!_duplex_node(nl, nh))
				{
#if udiagnose
					if (*pfix == 20)
					{
						var _tmp = Console.Out;
						lock (_tmp)
						{
							tw_failure = _tmp;

							_tmp.WriteLine(new String('.', dots));
							dots = 0;

							_duplex_node(nl, nh);

							_tmp.WriteLine("=============================================");
							_tmp.WriteLine(new _path_walker_x3i(this, nl).StringJoin(Environment.NewLine));
							_tmp.WriteLine("- - - - - - - - - - - - - - - - - - - - - - -");
							_tmp.WriteLine(new _path_walker_x3i(this, nh).StringJoin(Environment.NewLine));

							tw_failure = null;
						}
						continue;
					}
#endif
					return false;
				}
#if udiagnose
				if (*pfix == 20)
					if (Interlocked.Increment(ref dots) == 100)
					{
						var _tmp = Console.Out;
						lock (_tmp)
						{
							tw_failure = _tmp;

							_tmp.WriteLine(new String('.', 100));
							dots = 0;

							tw_failure = null;
						}
					}
#endif
			}
			while (*++pfix != 0xFFFF);

			if (hi->gen != gen + 3)
				Debug.Assert(lo->gen != gen + 3);

#if P1_COUNT
			if (lo->gen != gen + 3)
			{
				count_p1 -= hi->c_cnt + lo->c_cnt - c;
				hi->c_cnt = 0;
				lo->c_cnt = (ushort)c;
			}
#endif

			return true;
		}

#if udiagnose
		static int dots;
#endif

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		bool _make_root_features_well_formed(Slot* ps)
		{
			Debug.Assert(ps->forward == 0 && (ps->f & Edge.Flag.EtmNonBareType) != 0);
			Slot* nps;
			ushort* pfix = *(ushort**)((long)rgpfix + ((int)ps->f << 3));
			int c = *(pfix - 1);
			do
			{
				if ((nps = _source_fetch(ps, pbx + ps->tfsix, *pfix)) == null)
				{
					if ((opts & Tfs.Flags.RootCoverage) == 0)
						c--;
					continue;
				}
				if (nps->gen == gen + g_nwf && (nps->f & Edge.Flag.EtmNonBareType) != 0)
				{
					Slot* ps_exp = _init_tfs_top_edge(_get_expand_tfs(nps->f));
					if (!_duplex_hilo(nps, ps_exp))
						return false;
				}
			}
			while (*++pfix != 0xFFFF);
#if P1_COUNT
			if (ps->c_cnt == 0)
			{
				count_p1 += c;
				ps->c_cnt = (ushort)c;
			}
			else if (c != ps->c_cnt)
			{
				count_p1 += c - ps->c_cnt;
				ps->c_cnt = (ushort)c;
			}
#endif
			return true;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// 
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		pinfo* _find_tfs(Tfs tfs)
		{
			pinfo* pi;
			for (int i = 1; i <= c_participants; i++)
				if ((pi = pbx + i)->tfs == tfs)
					return pi;
			return null;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// 
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Slot* _find_root_slot(Tfs tfs)
		{
			pinfo* pi;
			return (pi = _find_tfs(tfs)) == null ? null : ps_base + pi->uix0;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Slot* _find_init_participant_slot(ITfsSlot ts)
		{
			Tfs tfs;
			pinfo* pi;
			if ((pi = _find_tfs(tfs = ts.Tfs)) == null)
				pi = _init_tfs(tfs);

			Slot* ps = ps_base + (pi->uix0 + ts.SlotIndex);
			if (ps->gen < gen)
				_init_node(ref ps, pi->tfsix, tfs.GetIx1Edge(ts.SlotIndex));
			else
				while (ps->forward < 0)
					ps = (Slot*)((long)ps_base - ps->forward);

			return ps;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		[DebuggerStepThrough]
		Slot* _init_tfs_top_edge(Tfs tfs)
		{
			Debug.Assert(((ITfsSlot)tfs).SlotIndex == 0);

			pinfo* pi = _init_tfs(tfs);
			Slot* ps = ps_base + pi->uix0;
			_init_node(ref ps, pi->tfsix, tfs._top_edge);	// note: type from original TFS
			return ps;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Slot* _init_tfs_node(Tfs tfs, int ix1)
		{
			Debug.Assert(ix1 >= 0 && (tfs is _atfs_base || ix1 == 0));

			pinfo* pi = _init_tfs(tfs);
			Slot* ps = ps_base + (pi->uix0 + ix1);
			_init_node(ref ps, pi->tfsix, tfs.GetIx1Edge(ix1));	// note: type from original TFS
			return ps;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		pinfo* _init_tfs(Tfs tfs)
		{
			if (c_participants >= ParticipantsMax)
				throw new Exception("need more unify participant entries");

			_atfs_base atfs;
			if ((atfs = tfs as _atfs_base) == null)
				atfs = tfs.Type.fc.RootCoverageTfs;

			c_participants++;
			pinfo* pi = pbx + c_participants;

			pi->coref_designations = pps_corefs + c_corefs - 1;
			if ((c_corefs += atfs.c_corefs) >= CorefsMax)
				throw new Exception("need more unify coref entries");

			if (((pi + 1)->uix0 = pi->uix0 + 1 + (int)atfs.SlotCount) >= SlotsMax)
				throw new Exception("need more unify slots");

			pi->gch_tfs = GCHandle.Alloc(atfs, GCHandleType.Normal);
			pi->entries = (arr_tfs_entry*)
				(pi->gch_entries = GCHandle.Alloc(atfs.entries, GCHandleType.Pinned)).AddrOfPinnedObject();
			ArrayTfs _a = atfs as ArrayTfs;
			if (_a != null)
			{
				pi->gch_hash = GCHandle.Alloc(_a.h, GCHandleType.Pinned);
				pi->phf = (ushort*)pi->gch_hash.AddrOfPinnedObject();
			}
			else
			{
				pi->gch_hash = default(GCHandle);
				pi->phf = null;
			}

			_debug_tfs_init(pi);

			return pi;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// note asymmetry: completes the un-unified outer part of the result TFS with structure from the 'x' pair
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		ArrayTfs unify_sections_into_shell(ITfsSlot shell, IEnumerable<Pairing<ITfsSlot>> sections)
		{
			Pairing<ITfsSlot> pr;

			var e = sections.GetEnumerator();
			while (e.MoveNext())
				if (!_duplex_node(_find_init_participant_slot((pr = e.Current).x), _find_init_participant_slot(pr.y)))
					return freeU();

			Slot* ps = _find_init_participant_slot(shell);

			return writeU(ps);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// 
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		bool check_only(ITfsSlot ts0, ITfsSlot ts1)
		{
			rgpfix = ftm.ParseRestrictors.FromTfsFlags(ts0.Tfs.flags | ts1.Tfs.flags).R;

			Slot* ps1 = _init_tfs_node(ts0.Tfs, ts0.SlotIndex);
			Slot* ps2 = _init_tfs_node(ts1.Tfs, ts1.SlotIndex);

			return freeU(_duplex_node(ps1, ps2));
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// 
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		bool check_multiple(IEnumerable<Pairing<ITfsSlot>> rgpr)
		{
			var e = rgpr.GetEnumerator();
			e.MoveNext();
			Pairing<ITfsSlot> pr = e.Current;

			Tfs tfs0 = pr.x.Tfs;
			Tfs tfs1 = pr.y.Tfs;

			Slot* ps1 = _init_tfs_top_edge(tfs0);
			Slot* ps2 = _init_tfs_top_edge(tfs1);
			Debug.Assert(ps1->tfsix == 1 && ps2->tfsix == 2);

			while (true)
			{
				Slot* psd1 = ps1 + pr.x.SlotIndex;
				_init_node(ref psd1, 1, pr.x.Edge());

				Slot* psd2 = ps2 + pr.y.SlotIndex;
				_init_node(ref psd2, 2, pr.y.Edge());

				if (!_duplex_node(psd1, psd2))
					return freeU(false);

				if (!e.MoveNext())
					break;
				pr = e.Current;
			}

			return freeU(true);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// 
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Tfs unify_daughters(Tfs md_tfs, Tfs[] rgd)
		{
#if DEBUG
			/// todo todo: we need to pass multiple daughter slot indexes (in the mother) into the writing
			/// below, to help the outer scan detect the case where the TOPMOST dtr entry might not 
			/// be coreferenced. It only works now because we're always using this function 
			/// with daughter-arg deletion, which renders the daughters unreachable anyway.
			//if ((opts & Tfs.Flags.DeleteArgs) == 0)
			//	throw not.impl;
#endif

			var rgm = md_tfs[md_tfs.im.RuleArgsPath].ListSlots;

			Debug.Assert(rgm.Length == rgd.Length);

			Slot* ps_m = _init_tfs_top_edge(md_tfs);
			Debug.Assert(ps_m->tfsix == 1);

			//pinfo* pi_outer = pbx + 1;

#if SKIP
			hs_skip = new HashSet<IntPtr>();
#endif
#if PRE_PLUG
			if ((opts & Tfs.Flags.DeleteArgs) != 0)
			{
				byte[] dtr_entries = new byte[md_tfs.CorefCount];
				EdgeCounter ecr1 = new EdgeCounter(md_tfs, md_tfs.TopEdge, dtr_entries, rgpfix);

				for (int i = 0; i < dtr_entries.Length; i++)
				{
					if (dtr_entries[i] > 0)
					{
						int ix = md_tfs.GetOutMarkSlots(~i)[0];
						Debug.Assert(ix != 0);
						Scratch* px = ps_m + ix;

						px->tfsix = 1;
						px->forward = 0;
#if P1_COUNT
						px->c_cnt = 0;
#endif
						px->e_ul = pi_outer->entries[ix - 1].e_ul & 0xFFFFFFFF7FFFFFFF;
						px->gen = (px->f & Edge.Flag.EtmNonBareType) == 0 ? gen : gen + 5;
						*(pi_outer->coref_designations + i + 1) = px;
					}
				}
			}
#endif

			for (int i = 0; i < rgd.Length; i++)
			{
				Debug.Assert(rgm[i].IsValid);

				Slot* ps_mm = ps_m + rgm[i].ix1;
				if (ps_mm->gen < gen)
					_init_node(ref ps_mm, 1, rgm[i].Edge);

				Slot* ps_dd = _init_tfs_top_edge(rgd[i]);

				if ((opts & Tfs.Flags.DeleteArgs) != 0)
				{
					ps_mm->gen = gen + 3;
					ps_dd->gen = gen + 3;
				}

				if (!_duplex_hilo(ps_mm, ps_dd))
					return freeU();
			}

			return writeU(ps_m /*, -1  e.g.: md_tfs.DaughterNodes[0].SlotIndex*/);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// 
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		ArrayTfs unify_section(TfsSlot _outer, ITfsSlot _inner)
		{
			const int tfsix_outer = 1;

			Debug.Assert(_outer.tfs._top_edge.Mark == Tfs.TopmostMark && (_outer.tfs._top_edge.FlagsId & Edge.Flag.EtmNonBareType) != 0);

			Slot* pso, psi;
			pso = _init_tfs_node(_outer.tfs, _outer.ix1);
			psi = _init_tfs_node(_inner.Tfs, _inner.SlotIndex);

#if P1_COUNT
			//count_p1 = *(*(ushort**)((long)rgpfix + ((int)pso->f << 3)) - 1);
			//pso->c_cnt = (ushort)count_p1;
			pso->c_cnt = 0;
#endif
#if RESTRICT_DTR
			rgpfix = tm.restrictors.FromTfsFlags(opts | Tfs.Flags.DeleteArgs).R;
#endif
			pinfo* pi_outer = pbx + tfsix_outer;
			Slot* ps = ps_base + pi_outer->uix0;

#if SKIP
			hs_skip = new HashSet<IntPtr>();
#endif
#if PRE_PLUG

			if ((opts & Tfs.Flags.DeleteArgs) != 0)
			{
				byte[] dtr_entries = new byte[outer.CorefCount];
				EdgeCounter ecr1 = new EdgeCounter(outer, outer.TopEdge, dtr_entries, rgpfix);

				for (int i = 0; i < dtr_entries.Length; i++)
				{
					if (dtr_entries[i] > 0)
					{
						int ix = outer.GetOutMarkSlots(~i)[0];
						Debug.Assert(ix != 0);
						Scratch* px = ps + ix;

						px->tfsix = tfsix_outer;
						px->forward = 0;
#if P1_COUNT
						px->c_cnt = 0;
#endif
						px->e_ul = pi_outer->entries[ix - 1].e_ul & 0xFFFFFFFF7FFFFFFF;
						px->gen = (px->f & Edge.Flag.EtmNonBareType) == 0 ? gen : gen + 5;
						*(pi_outer->coref_designations + i + 1) = px;
					}
				}
			}
#endif
			if ((opts & Tfs.Flags.DeleteArgs) != 0)
			{
				pso->gen = gen + 3;
				psi->gen = gen + 3;
			}

			if (!_duplex_node(psi, pso))
				return freeU();

#if RESTRICT_DTR
			rgpfix = tm.restrictors.FromTfsFlags(opts).R;
#endif
			if (ps->gen < gen)
				_init_node(ref ps, tfsix_outer, _outer.tfs._top_edge);
			else
				while (ps->forward < 0)
					ps = (Slot*)((long)ps_base - ps->forward);

#if false
			/// if both inputs are restricted then we should know the count already based on the p1 count 
			/// and their slot counts?
			if ((opts & Tfs.Flags.Restrict) != 0 &&
				(outer.flags & Tfs.Flags.Restrict) != 0 &&
				(inner.flags & Tfs.Flags.Restrict) != 0)
			{
			}
#endif
			return writeU(ps /*, _outer.ix1*/);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// for all of the expading functions which follow, note that we don't set the index of a non-well-formed TFS 
		/// until *after* initializing its scratch slots, the single scratch slot for the topmost edge of the expanding 
		/// TFS is actually marked well-formed, and this prevents unbounded recursion during expansion
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Tfs unify_force_expand(Instance t, Tfs[] args)
		{
			Slot* ps = _init_tfs_top_edge(t.Definition);
			this.tfsix_nwf = ps->tfsix;
#if P1_COUNT
			//count_p1 = *(*(ushort**)((long)rgpfix + ((int)ps->f << 3)) - 1);
			//ps->c_cnt = (ushort)count_p1;
#endif
			for (int i = 0; i < args.Length; i++)
			{
				if (!_duplex_hilo(_init_tfs_top_edge(args[i]), ps))
					return freeU();

				while (ps->forward < 0)
					ps = (Slot*)((long)ps_base - ps->forward);
			}
			return _make_root_features_well_formed(ps) ? writeU(ps) : freeU();
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Tfs expand(Instance ent, Tfs tfs)
		{
			Slot* ps = _init_tfs_top_edge(ent.Definition);
			this.tfsix_nwf = ps->tfsix;
#if P1_COUNT
			//count_p1 = *(*(ushort**)((long)rgpfix + ((int)ps->f << 3)) - 1);
			//ps->c_cnt = (ushort)count_p1;
#endif
			if (!_duplex_hilo(ps, _init_tfs_top_edge(tfs)))
				return freeU();

			while (ps->forward < 0)
				ps = (Slot*)((long)ps_base - ps->forward);

			return _make_root_features_well_formed(ps) ? writeU(ps) : freeU();
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Tfs expand(Tfs tfs)
		{
			Slot* ps = _init_tfs_top_edge(tfs);
			this.tfsix_nwf = ps->tfsix;

#if P1_COUNT
			//count_p1 = *(*(ushort**)((long)rgpfix + ((int)ps->f << 3)) - 1);
			//ps->c_cnt = (ushort)count_p1;
#endif
			return _make_root_features_well_formed(ps) ? writeU(ps) : freeU();
		}


		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// The expand calls need to prepare for possible re-entrancy. When done, destroy unifier from 
		/// re-entrancy (if any) and restore original thread-static unifier
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Tfs _get_expand_tfs(Edge.Flag f)
		{
			Type t;
			Tfs tfs;
			if ((tfs = (t = ftm.bcpo.type_arr[(int)(f & Edge.Flag.MultiIdMask)])._expanded) is BusyTfs)
				tfs = tfs.InterlockedWaitPulse(ref t._expanded);
			else if (tfs == null)
			{
				Debug.Assert(tfsix_nwf > 0);
				tfs = t.Expanded;
			}
			return tfs;
		}

		[MethodImpl(MethodImplOptions.AggressiveInlining)]
		public Slot* _get_slot_index(pinfo* pi, FeatMark fm)
		{
			return _get_slot_index(pi, fm.i_feat, fm.m);
		}
		[MethodImpl(MethodImplOptions.AggressiveInlining)]
		public Slot* _get_slot_index(pinfo* pi, int i_feat, int mark)
		{
			return ps_base + (pi->uix0 + pi->tfs.GetEdgeIndex(i_feat, mark));
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// 
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		void OLD_CODE()
		{
#if SINGLE_FEAT_RESTRICT
			//slightly hacky -- assumes PredRestrict and FormRestrict won't be used together (which they probably won't...)
			if ((opts & Tfs.Flags.PredRestrict) != 0)
				this.ifeat_restrict = uc.mrsm.ifeat_pred;
			else
				this.ifeat_restrict = -1;
#endif
		}

		[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
		public static bool Check(Restrictor r, ITfsSlot ts0, ITfsSlot ts1)
		{
			Debug.Assert(r.opts == 0);
			return allocU().init_reader(r, 0).check_only(ts0, ts1);
		}

		[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
		public static bool CheckMultipleNodePointers(Restrictor r, IEnumerable<Pairing<ITfsSlot>> rgpr)
		{
			return allocU().init_reader(r, r.opts).check_multiple(rgpr);
		}

		[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
		public static ArrayTfs UnifySectionsIntoShell(Tfs shell, IEnumerable<Pairing<ITfsSlot>> sections, Tfs.Flags opts = Tfs.Flags.None, Restrictor r = null)
		{
			return allocU().init_reader(r ?? shell.ftm.r_none, opts).unify_sections_into_shell(shell, sections);
		}

		[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
		public static Tfs UnifyAllDaughters(Tfs md_tfs, Tfs[] dtrs)
		{
			var r = md_tfs.ftm.ParseRestrictors.r_args;

			return allocU().init_reader(r, r.opts).unify_daughters(md_tfs, dtrs);
		}

		[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
		public static ArrayTfs UnifySection(Restrictor r, Tfs.Flags opts, TfsSlot ts_outer, ITfsSlot ts_inner)
		{
			return allocU().init_reader(r, opts).unify_section(ts_outer, ts_inner);
		}

		[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
		public static Tfs UnifyForceExpand(Restrictor r, Instance t, Tfs[] args)
		{
			Debug.Assert(r.opts == Tfs.Flags.None);
			return allocU().init_reader(r, Tfs.Flags.RootCoverage).unify_force_expand(t, args);
		}

		public static Tfs Expand(Restrictor r, Tfs tfs, Tfs.Flags opts)
		{
			return allocU().init_reader(r, opts).expand(tfs);
		}

		public static Tfs Expand(Restrictor r, Instance ent, Tfs tfs)
		{
			Debug.Assert(r.opts == 0);
			return allocU().init_reader(r, 0).expand(ent, tfs);
		}

		//public static void MrsToBag(Restrictor r, agree.mrs.Mrs mrs, TfsBag bag)
		//{
		//	Debug.Assert(r.opts == 0);
		//	allocU().init_reader(r, 0).mrs_to_bag(mrs, bag);
		//}


		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// participant info. representing a trivial mapping of a TFS into a range of unifier slots. The same TFS can
		/// be loaded multiple times without worrying about its coreferences self-conflating. Because this struct is
		/// manipulated by pointer, managed handles are stored as GCHandles and corresponding care must be applied.
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		public partial struct pinfo
		{
			/// <summary> managed handle to the TFS itself. (not-pinned) </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public GCHandle gch_tfs;

			/// <summary> managed handle to the TFS's storage array. **pinned** </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public GCHandle gch_entries;

			/// <summary> managed handle to the TFS's hash table. **pinned** </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public GCHandle gch_hash;

			/// <summary> native pointer to the pinned storage array </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public arr_tfs_entry* entries;

			/// <summary> native pointer to the pinned hash table. </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public ushort* phf;

			/// <summary> pointer to a range of coreference fixups which the unifier populates dynamically 
			/// in order to restore and track referential equivalence of coreferenced nodes which is not 
			/// represented in the array storage model </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public Slot** coref_designations;

			/// <summary> 0-based index of the first unifier slot assigned to this participant. </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public int uix0;

			/// <summary>
			/// 1-based index of the participant this slot refers to. Similar to the unifier slots,
			/// this field allows the unifier handle to be recovered from position u.pbx[0]. Other than
			/// this use though, because the value of this field--which is fixed for the lifetime of 
			/// the unifier--is trivially given by (pi-pbx), it is not as important as the caching 
			/// of 'tfsix' in the _unifier_ slot since the latter is operationally variant and much 
			/// more expensive (given only the slot index or pointer) to recover (viz. the debug
			/// function 'get_pi_for_old').
			/// </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public int tfsix;

			[MethodImpl(MethodImplOptions.AggressiveInlining)]
			public int _fetch_ix(int i_feat, int mark, Edge* pe)
			{
				Debug.Assert(mark != 0);
				if (phf == null)
					return wtfs.GetEdgeIndex(i_feat, mark, pe);
				long ent_base = (long)entries;
				arr_tfs_entry* pa;
				int ix;
				if ((ix = phf[(byte)(i_feat ^ mark)]) != 0)
				{
					*((short*)&i_feat + 1) = (short)mark;
					do
						if (*(int*)(pa = (arr_tfs_entry*)(ent_base + ((ix - 1) << 4))) == i_feat)
						{
							*pe = pa->e;
							break;
						}
					while ((ix = pa->next) != 0);
				}
				return ix;
			}

			[MethodImpl(MethodImplOptions.AggressiveInlining)]
			public int _fetch_ix_only(int i_feat, int mark)
			{
				Debug.Assert(mark != 0);
				if (phf == null)
					return wtfs.GetEdgeIndex(i_feat, mark);
				long ent_base = (long)entries;
				arr_tfs_entry* pa;
				int ix;
				if ((ix = phf[(byte)(i_feat ^ mark)]) != 0)
				{
					*((short*)&i_feat + 1) = (short)mark;
					do
						if (*(int*)(pa = (arr_tfs_entry*)(ent_base + ((ix - 1) << 4))) == i_feat)
							return ix;
					while ((ix = pa->next) != 0);
				}
				return 0;
			}

			/// <summary> Debug use. Generally, you should have this already </summary>
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public Unification _u
			{
				get { fixed (pinfo* _pi = &this) return (Unification)((GCHandle*)(_pi - tfsix))->Target; }
			}

			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public WriteableTfs wtfs
			{
				get { return gch_tfs.IsAllocated ? (WriteableTfs)gch_tfs.Target : default(WriteableTfs); }
			}

			//[DebuggerDisplay("{(gch_tfs.IsAllocated?tfs.ToString():\"(not initialized)\"),nq}")]
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public Tfs tfs
			{
				get { return gch_tfs.IsAllocated ? (Tfs)gch_tfs.Target : default(Tfs); }
			}

			public void cleanup()
			{
				gch_tfs.Free();
				gch_entries.Free();
				if (gch_hash.IsAllocated)
					gch_hash.Free();
#if DEBUG
				gch_tfs = default(GCHandle);
				gch_entries = default(GCHandle);
				gch_hash = default(GCHandle);
				entries = null;
				coref_designations = null;
#endif
			}
		};

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// 
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		[StructLayout(LayoutKind.Explicit, Size = 32)]
		public partial struct Slot
		{
			///////////////////////////////////////////////////////////////////////
			/// canonical names
			/// 
#if DEBUG
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(0)]
			int _forward;
			[DebuggerBrowsable(DebuggerBrowsableState.Never)]
			public int forward
			{
				[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
				get { return _forward; }
				[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
				set
				{
					if (value == _forward)
						return;
					_forward = value;
					_u._rfi = null;
				}
			}
#else
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(0)]
			public int forward;
#endif
			//[DebuggerBrowsable(DebuggerBrowsableState.Never), DebuggerDisplay("{this.Gen}  ({gen,h})")]
			[FieldOffset(4)]
			public uint gen;

			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(8)]
			public short tfsix;
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(10)]
			public ushort i_feat;
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(12)]
			public short d_over;

#if P1_COUNT
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(14)]
			public ushort c_cnt;
#endif

			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(24)]
			public int ix_upper;

			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(28)]
			public short uix;
			/// 
			///////////////////////////////////////////////////////////////////////


			///////////////////////////////////////////////////////////////////////
			/// note: union (overlaying aliases)
			/// 
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(16)]
			public Edge.Flag f;
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(20)]
			public int m_src;
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(16)]
			public Edge e;
			[DebuggerBrowsable(DebuggerBrowsableState.Never), FieldOffset(16)]
			public ulong e_ul;
			/// 
			///////////////////////////////////////////////////////////////////////

			/// <summary> please do not use this; use u.init_node instead </summary>
			[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
			internal void _init_node(int tfsix, Edge e, uint gen)
			{
				Debug.Assert(tfsix > 0);
#if UNIFIER_DEBUGGING
				Debug.Assert(this.gen < gen);
#endif
				this.forward = 0;
				this.tfsix = (short)tfsix;
				this.e_ul = *(ulong*)&e & 0xFFFFFFFF7FFFFFFF;
				this.gen = gen;
			}

			/// <summary>
			/// Although generally not a best practice, it is possible to find out which unifier a given
			/// unifer slot belongs to, given only the slot pointer. To enable this, a GCHandle to the
			/// unifier is stored at location ps_base[-1] in its respective set of slots during unifier
			/// initialization. Now that the (presence of the) slot index (in every slot) has been 
			/// promoted to non-debug builds (since, once initialized, they never need to be updated 
			/// during operation), you can subtract off the slot index to find and recover the managed 
			/// unifier handle. Because of the hoops involved, the technique is mostly intended for 
			/// debug and development use. 
			/// </summary>
			public Unification _u
			{
				[MethodImpl(MethodImplOptions.AggressiveInlining), DebuggerStepThrough]
				get
				{
					fixed (Slot* ps = &this)
						return (Unification)((GCHandle*)((long)ps - ((uix + 1) << SHL_PS)))->Target;
				}
			}
			public FeatMgr _ftm { get { return _u.ftm; } }

			/// <summary>
			/// pointer to the TFS's 'participant info' for this unification operation. To be unified
			/// a TFS must be loaded into the unifier, at which time it is given a range of unifier slots
			/// and becomes known as a 'participant'. This function has to recover the slot's unifier on
			/// its own and thus is mostly for debug use. For a better option, provide the correct 
			/// unifier and compute 'u.pbx + ps->tfsix'
			/// </summary>
			public pinfo* _pi
			{
				[MethodImpl(MethodImplOptions.AggressiveInlining)]
				get
				{
					var u = this._u;
#if DEBUG
					if (gen < u.gen)
						return u._find_pi_for_old(uix);
#endif
					return u.pbx + tfsix;
				}
			}
			//public pinfo* pi(Unification u) { return u.pbx + this.tfsix; }

			/// <summary>
			/// 1-based storage index (subtract 1 to use as an index in 'entries') in the source TFS
			/// </summary>
			//public int _ix1 { get { return uix - _pi->uix0; } }
			//public int ix1(pinfo* pi) { return this.uix - pi->uix0; }
		};
	};
}
