﻿using System;
using System.Collections;
using System.Collections.Generic;
using System.ComponentModel;
using System.Diagnostics;
using System.Linq;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Windows.Markup;

using agree.configuration;

using alib;
using alib.Array;
using alib.BitArray;
using alib.Collections;
using alib.Debugging;
using alib.Dictionary;
using alib.Enumerable;
using alib.Hashing;

namespace agree
{
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	/// <summary>
	/// Preliminary storage for the authored types whilst they are being loaded. After the grammar load is complete, this
	/// object would typically be released.
	/// </summary>
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	[ContentProperty("Grammar")]
	[UsableDuringInitialization(true)]
	public sealed class TypeDictionary : has_grammar_base, IIdentDict<String, Type>, ISupportInitializeNotification
	{
		static TypeDictionary()
		{
			MethodInfo mi;
			foreach (var _mi in typeof(arr).GetMethods((BindingFlags)0x3A))
			{
				if ((mi = _mi).IsGenericMethod)
					if (mi.GetGenericArguments().Length != 1 || mi.Name.Contains("Value"))
						continue;
					else
						mi = mi.MakeGenericMethod(typeof(agree.Type));
				RuntimeHelpers.PrepareMethod(mi.MethodHandle);
			}
		}

		public TypeDictionary(IRtParent parent)
			: base(parent)
		{
			this.NullTfs = new NullTfs(this);
			this.dict = new SlotDictionary<String, Type>(StringComparer.OrdinalIgnoreCase);
			this.strings = new IndexedHashSafe<String>();
			this.strings.Add(String.Empty);
			Debug.Assert(strings[String.Empty] == (int)(EdgeFlag.EmptyString & Edge.Flag.MultiIdMask));
		}

		public readonly NullTfs NullTfs;

		String s_string_type = null;
		public IndexedHashSafe<String> strings;

		public void EndInit()
		{
			if (ΔTop == null)
			{
				ΔTop = this.AddType(new ConsDefs(AgreeConfig.Types.TopType));
				if (ΔTop._id != TypesConfig.TopId)
					throw new TypeManagerException("TopType must receive id value 0.");
				ΔTop.m_flags = Type.Flags.TopType;
			}

			/// prevent duplicate loading of the string type
			s_string_type = AgreeConfig.Types.StringType;
		}

		public Type ΔTop;
		public Type ΔString;

		readonly SlotDictionary<String, Type> dict;

		public Type AddType(ConsDefs consdef)
		{
			var s_typename = consdef.identifier.Text;

			Type Δnew = new Type(this.g, s_typename);
			Δnew.Initialize(consdef);
			Δnew._id = dict.Count;
			dict.Add(Δnew.Name, Δnew);

			if (s_typename == s_string_type)
				ΔString = Δnew;
			return Δnew;
		}

		public int Count { get { return dict.Count; } }

		public Type this[String name]
		{
			get
			{
				Type t;
				return dict != null && dict.TryGetValue(name, out t) ? t : null;
			}
			set { throw not.valid; }
		}

		public bool TryGetType(String s_typename, out Type Δout) { return dict.TryGetValue(s_typename, out Δout); }
		public bool TryGetValue(String s_typename, out Type Δout) { return dict.TryGetValue(s_typename, out Δout); }

		public ICollection<String> Keys { get { return dict.Keys; } }
		public ICollection<Type> Values { get { return dict.Values; } }

		public bool IsReadOnly { get { return false; } }

		public void Add(String name, Type Δ) { throw not.valid; }
		public bool ContainsKey(String name) { return dict.ContainsKey(name); }
		public bool Remove(String key) { return dict.Remove(key); }
		public void Clear() { throw not.valid; }
		public void Add(KeyValuePair<String, Type> item) { throw not.valid; }
		public bool Contains(KeyValuePair<String, Type> item) { throw not.valid; }
		public bool Remove(KeyValuePair<String, Type> item) { throw not.valid; }
		public void CopyTo(KeyValuePair<String, Type>[] array, int arrayIndex) { throw not.valid; }
		public IEnumerator<KeyValuePair<String, Type>> GetEnumerator() { throw not.valid; }
		IEnumerator IEnumerable.GetEnumerator() { return GetEnumerator(); }
	};



	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	/// <summary>
	/// A singly-rooted directed graph of types.
	/// </summary>
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	[ContentProperty("Types")]
	//[System.Windows.Markup.ContentWrapper(typeof(TypeGraphDict))]
	//[System.Windows.Markup.ContentWrapper(typeof(TypeGraphFinalizer))]
	public sealed partial class TypeLattice : has_grammar_base, IIdentList<Type>, _IList<Type>, IIndexedHash<Type>
	{
		public TypeLattice(IRtParent parent, Grammar g)
			: base(parent, g)
		{
			if (g != null)
				g.bcpo = this;
		}

		public TypeLattice(IRtParent parent)
			: this(parent, default(Grammar))
		{
		}

		/// <summary>
		/// this is the final and complete set of types after loading the grammar and processing the type hierarchy.
		/// The type objects in this array are in a topological order from ΔTop to leaf(s), and where the type ID 
		/// values which are assigned to each type also correspond to that type's index within the array.
		/// </summary>
		public Type[] type_arr;

		public alib.Collections.ReadOnly.ReadOnlyDictionary<BitArr, Type> code_dict;

		/// <summary>
		/// A record of the number of HPSG types which were present prior to the insertion of the set of GLB types 
		/// (if any) needed to ensure deterministc unification. That is, this is the number of types which were 
		/// actually provided by the grammarian.
		/// </summary>
		public int c_authored_types;

		public int Count { get { return td == null ? -1 : td.Count; } }

		public Type this[int index] { get { return type_arr[index]; } }

		int IIndexedHash<Type>.this[Type t] { get { return t._id; } }

		public Type[] ConvertTo(IEnumerable<int> items)
		{
			var arr = new Type[items._Count()];
			var e = items.GetEnumerator();
			int i = 0;
			while (e.MoveNext())
				arr[i++] = type_arr[e.Current];
			return arr;
		}

		int[] IIndexedHash<Type>.ConvertFrom(IEnumerable<Type> items)
		{
			int[] arr = new int[items._Count()];
			var e = items.GetEnumerator();
			int i = 0;
			while (e.MoveNext())
				arr[i++] = e.Current._id;
			return arr;
		}

		public Type[] Types { get { return type_arr; } }

		public void MultiAdd(Object value)
		{
			set_dict((TypeGraphDict)value);
		}

		void set_dict(TypeGraphDict codes)
		{
			this.g = codes.Grammar;
			g.bcpo = this;

			this.code_dict = codes.dict;
			this.c_authored_types = codes.c_authored_types;

			/// copy final set of types to an array
			type_arr = code_dict.GetValuesArray();

			/// sort the type array according to the graph partial order
			type_arr.qsort(Type.Compare.DescendantCount.Reverse);

			/// assign type ids according to the partial order
			int i = 0;
			foreach (Type Δ in type_arr)
				Δ._id = i++;

			Debug.Assert(type_arr[configuration.TypesConfig.TopId] == td.ΔTop);

			if ((uint)type_arr.Length > (uint)Edge.Flag.MultiIdMask)
			{
				String msg = String.Format("The system supports a maximum of {0} types. {1} types were specified and {2} types were needed to embed a partial order in the type hierarchy.",
								(int)Edge.Flag.MultiIdMask,
								c_authored_types,
								type_arr.Length - c_authored_types);
				throw new Exception(msg);
			}

			/// diagnostic reports
			_dbg_display_type_hierarchy_info();

			various_reports();
		}

		IEnumerator IEnumerable.GetEnumerator() { return GetEnumerator(); }

		public IEnumerator<Type> GetEnumerator()
		{
			return type_arr == null ? td.Values.GetEnumerator() : ((IEnumerable<Type>)type_arr).GetEnumerator();
		}
	};


	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	/// For delivering the lattice computation result to TypeLattice
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	public sealed class TypeGraphDict : has_grammar_base
	{
		public TypeGraphDict(IRtParent parent, Grammar g, IEnumerable<KeyValuePair<BitArr, Type>> dict, int c_authored_types)
			: base(parent, g)
		{
			this.dict = new alib.Collections.ReadOnly.ReadOnlyDictionary<BitArr, Type>(-1, dict);
			this.c_authored_types = c_authored_types;
		}
		public readonly alib.Collections.ReadOnly.ReadOnlyDictionary<BitArr, Type> dict;

		public readonly int c_authored_types;
	}

	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	/// <summary>
	/// A singly-rooted directed graph of types.
	/// Determines the set of greatest-lower-bound types required define a partial order.
	/// Inserts these types, updating graph edges for minimal closure.
	/// </summary>
	/// <citations>
	/// Hassan Ait-Kaci, Robert Boyer, Patrick Lincoln, Roger Nasr. 1989. "Efficient Implementation of Lattice Operations"
	/// </citations>
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	[ContentProperty("SourceDict")]
	public sealed partial class TypeGraphFinalizer : _monad<TypeDictionary>
#if AWAIT
		,System.Runtime.CompilerServices.INotifyCompletion,
		System.Runtime.CompilerServices.ICriticalNotifyCompletion
#endif
	{
		public TypeGraphFinalizer(IRtParent parent, TypeDictionary source_dict)
			: base(parent, source_dict)
		{
#if AWAIT
			this.rga = alib.Collections.Collection<Action>.None;
#endif
		}
		public TypeGraphFinalizer(IRtParent parent)
			: base(parent)
		{
		}

		public TypeDictionary SourceDict
		{
			get { return this.t0; }
			set { this.t0 = value; }
		}

		public int c_authored_types;

		///////////////////////////////////////////////////////////////////////
		/// 
#if AWAIT
		Action[] rga;
		public bool IsCompleted { get { return rga == null; } }
		public TypeLattice GetResult() { return bcpo; }
		public void OnCompleted(Action etc) { UnsafeOnCompleted(etc); }
		public TypeGraphFinalizer GetAwaiter() { return this; }
		public void UnsafeOnCompleted(Action etc)
		{
			var a = this.rga;
			do
				if (a == null)
				{
					etc();
					return;
				}
			while (a != (a = Interlocked.CompareExchange(ref this.rga, a.Append(etc), a)));
		}
		void complete_awaiters()
		{
			Action[] a;
			if (this.rga != null && (a = Interlocked.Exchange(ref this.rga, null)) != null)
				for (int i = 0; i < a.Length; i++)
					a[i]();
		}
#endif
		/// 
		///////////////////////////////////////////////////////////////////////


		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		///
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

		Dictionary<BitArr, Type> code_map;
		glb_walker walker;
		RefList<Type> ΔΔnew_glbs;
		Type[] ΔΔwork;
		int c_work;
		int next_glb_num = 1;


		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// <summary>
		/// calculate a partial order embedding for the type hierarchy lattice
		/// </summary>
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		public override Object ProvideValue(IServiceProvider sp)
		{
			if (t0 == null)
				throw new Exception("'SouceDict' is required.");

			if (t0.ΔTop == null)
				throw new TdlException("'*top*' type not defined");

			if (t0.ΔString == null)
			{
				t0.ΔString = t0.AddType(new ConsDefs(t0.AgreeConfig.Types.StringType));
				//throw new TdlException("The string type '{0}' was not defined in any TDL file.", t1.AgreeConfig.Types.StringType);
			}

			/// retain the number of types prior to adding GLBs
			int c_authored_types = t0.Count;

			//check_redundant_links(types);

			code_map = new Dictionary<BitArr, Type>((int)(c_authored_types * 1.5));

			walker = new glb_walker(_set_codes_and_working_set(t0.Values));

			/// Embed the bounded complete partial order in the type graph
			embed_and_close(c_authored_types);

#if AWAIT
			/// execute continuations
			complete_awaiters();
#endif

			/// all types added; freeze the code dictionary
			/// it is now used when lazily or proactively populating the GLB cache
			return new TypeGraphDict(this, t0.Grammar, code_map, c_authored_types);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		///
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		void finalize_top_type(int code_size)
		{
			Type Δtop = t0.ΔTop;
			Δtop.bit_num = -1;
			BitArr vec = Δtop.bit_vec = new BitArr(code_size);
			vec.SetAll();
			code_map.Add(vec, Δtop);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// Determine initial set of codes for the GLB types which will be needed to close the graph.
		/// Assign Aït-Kaci bit codes to all user-specified types. This is done in two passes. At first, an arbitrary
		/// bit is assigned for the purpose of counting descendants. Final bit positions are assigned in ascending
		/// order of number of descendants.
		/// In addition to the bitarray within the types themselves, create an array which maps, via its index, each 
		/// single bit position to its type. Note: *top* no longer receives a bit of its own and accordingly is not 
		/// included in this map.
		/// todo: *top* getting no bit doesn't work if you author only a single type ('string')
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Type[] _set_codes_and_working_set(ICollection<Type> types_in)
		{
			int code_size = types_in.Count - 1;

			/// can finish with *top* here, including adding it to the code dictionary, but don't 
			/// add any other codes to the code dictionary prematurely: see warning below
			finalize_top_type(code_size);

			/// mapping used for developing the partial order of the initial set of types.
			Type[] bit_mapping = new Type[code_size];
			set_temp_codes(types_in, bit_mapping);

			/// initial working array for the actual GLB computation
			ΔΔwork = new Type[c_work];

			/// sort by number of descendants (ascending), which is the number of ones bits set in 
			/// each temporary code. having temporarily co-opted the final bit array for the 
			/// previous operation, we can therefore use the sort operation intended for final use 
			/// on our temporary bits as well.
			bit_mapping.qsort(Type.Compare.DescendantCount.Forward);

			set_final_codes(bit_mapping);

			return bit_mapping;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// load input types into the analysis array. Mark a unique, arbitraily assigned bit in the bitarray for each 
		/// of the types' parents, creating these bit-arrays of the required code size on-the-fly.
		/// count non-*top* nodes which have more than 1 child. only such nodes need to be considered 
		/// for having the problem of a non-unique GLB. Beyond just excluding leaf nodes, non-leaf (graph-internal) 
		/// nodes with only one child also do not need to be evaluated. Since the search is N^2 in the initial set, 
		/// it is worthwhile to take this step
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		void set_temp_codes(ICollection<Type> types_in, Type[] bit_mapping)
		{
			int i = 0, code_size = bit_mapping.Length;
			foreach (Type Δ in types_in)
				if (!Δ.IsTop)
				{
					(bit_mapping[i] = Δ)._mark_ancestor_bit_codes(code_size, i);
					i++;

					if (Δ.c_children > 1)
						c_work++;
				}
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// assign final codes, according to existing array order. Assign the the bit index field in the type, 
		/// but first use it to determine whether we are seeing a code for the first time in 
		/// this pass, in which case we need to erase the temporary bits. 
		/// warning: BitArr provides a value-dependent hash code, so do not add it as a dictionary key until you're 
		/// sure its bits have been finalized. since this pass is leaf-to-top, codes assigned in the outer loop are 
		/// final at the end of their iteration and can be added
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		void set_final_codes(Type[] bit_mapping)
		{
			int cur_bit = 0, i = c_work;
			foreach (Type Δ in bit_mapping)
			{
				BitArr code = Δ.reset_code_for_2nd_pass();
				code[Δ.bit_num = cur_bit++] = true;

				/// OR all descendant bits upwards into parent types
				Δ._set_parent_descendant_bits(code);

				/// prepare the working set for the GLB computation, ordered top-to-leaf
				if (Δ.c_children > 1)
					ΔΔwork[--i] = Δ;

				code_map.Add(code, Δ);
			}
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		/// Prior to the first iteration, the work array is already in an order which is sufficient to proceed; but it is 
		/// not the fully optimal ordering. The current order considers the graph depth, but not the number of bits (i.e. 
		/// children, in descending order), because the sort pre-dates that in the load sequence. I measured whether the 
		/// benefit to the embedding phase of sorting here outweighed the cost of the operation, and it does, slightly.
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		public void embed_and_close(int c_max)
		{
			while (true)
			{
				ΔΔnew_glbs = new RefList<Type>(c_max);

				for (int i = 0; i < c_work - 1; i++)
					find_add_glbs(ΔΔwork[i], i);

				if (ΔΔnew_glbs.Count <= 1)
					return;

				ΔΔnew_glbs.Sort(Type.Compare.DescendantCount.Reverse);
				ΔΔwork = ΔΔnew_glbs.GetUntrimmed(out c_work);
			}
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		///
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		void find_add_glbs(Type t0, int j)
		{
			Type t1;
			BitArr ba0 = t0.bit_vec, ba1, new_code;
			while (++j < c_work)
				if (ba0.FastTest(ba1 = (t1 = ΔΔwork[j]).bit_vec) && !code_map.ContainsKey(new_code = ba0.AndWithHash(ba1)))
					walker.InsertGlb(create_glb(new_code), t0, t1);
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		///
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		Type create_glb(BitArr new_code)
		{
			Type new_type = t0.AddType(new ConsDefs("glbtype" + next_glb_num++));
			new_type.bit_vec = new_code;
			new_type.bit_num = -2;
			new_type.m_flags |= Type.Flags.GlbType;

			code_map.Add(new_code, new_type);
			this.ΔΔnew_glbs.Add(new_type);
			return new_type;
		}

		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		///
		///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
		sealed class glb_walker
		{
			public glb_walker(Type[] bitpos2type)
			{
				this.bitpos2type = bitpos2type;
				topwards = new Type[100];
				this.leafwards = new RefList<Type>(3000);
				this.walked = new BitArr(10000);	// fix fix
				this.mbit_cov = new BitArr(bitpos2type.Length);
			}

			readonly Type[] bitpos2type;
			readonly BitArr mbit_cov, walked;
			readonly RefList<Type> leafwards;
			readonly Type[] topwards;
			int ctop;

			BitArr glb_code;
			Type glb;

			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			///
			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			public void InsertGlb(Type glb, Type Δ0, Type Δ1)
			{
				walked.ClearAll();
				mbit_cov.ClearAll();
				leafwards.SetCount(0);

				ctop = 2;
				walked[(topwards[0] = Δ0)._id] = true;
				walked[(topwards[1] = Δ1)._id] = true;

				glb_code = (this.glb = glb).bit_vec;
				_insert_glb();
			}

			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			/// bitpos2type[rgi[i]] is in 'Type.Compare.DescendantCount.Forward' order:
			///	            0 ...  c-1
			///	         leaf ...  top
			///	    fewest 1s ...  most 1s
			///	 lowest m_bit ...  highest m_bit
			///	 
			/// tricky part: we would like to bail out of adding too many candidate children to 
			/// the leafwards list. At first it seemed that if the coverage based on original bit 
			/// positions (mbit_cov) were complete and you then reached a leaf type (meaning the 
			/// remainder are also leaves), then you could stop, because leaf types, having only 
			/// one bit, have only one bit pattern permutation, which thus cannot be not-covered. 
			/// However, some of the leaf types we would be skipping may have GLB types as parents 
			/// which we do need to capture. So we will still walk up from these leaves, but we 
			/// won't add those leaves themselves
			/// pick parents first because the removal of their child edges needs to be done in a 
			/// second pass, which can piggypack on the child picking process.
			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			unsafe void _insert_glb()
			{
				int c;
				int* rgi = stackalloc int[c = glb_code.OnesCount], cur = rgi + c;
				glb_code.GetOnesPositions(rgi);

				while (--cur >= rgi)
				{
					Type child;
					walk_parents(child = bitpos2type[*cur]);

					if (mbit_cov.OnesCount == c)
						continue;

					if (child.c_children > 0)
					{
						if (mbit_cov[*cur])
							continue;
						mbit_cov.OrEq(child.bit_vec);
					}
					else if (!mbit_cov.TrySet(*cur))
						continue;

					leafwards.Add(child);
				}

				c = ctop;
				while (--c >= 0)
					glb.AddParent(topwards[c]);

				pick_children();
			}

			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			/// starting from (one of the child nodes in) the set of child nodes corresponding to each isolated bit in the new 
			/// code, move upwards in the graph gathering the distinct set of candidate children (leafwards) and candidate 
			/// parents (topwards) for the new GLB type
			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			void walk_parents(Type child)
			{
				Type par;
				var e = child.parents.GetEnumerator();
				while (e.MoveNext())
					if (!walked.TrySet((par = e.Current)._id))
						continue;
					else if (!glb_code.IsSubsumedBy(par.bit_vec))
						walk_parents(par);
					else if (substitute_better_parent(par.bit_vec))
						topwards[ctop++] = par;

				if ((child.m_flags & Instance.Flags.GlbType) != 0 && child.bit_vec.IsSubsumedBy(glb_code))
					leafwards.Add(child);
			}

			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			///
			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			bool substitute_better_parent(BitArr par_code)
			{
				int src = ctop, cpo = par_code.OnesCount;
				BitArr ba;
				while (--src >= 0)
					if ((ba = topwards[src].bit_vec).OnesCount < cpo && ba.IsSubsumedBy(par_code))
						return false;

				int dst = 0;
				Type t;
				while (++src < ctop)
					if ((ba = (t = topwards[src]).bit_vec).OnesCount < cpo || !par_code.IsSubsumedBy(ba))
					{
						if (src != dst)
							topwards[dst] = t;
						dst++;
					}
				ctop = dst;
				return true;
			}

			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			/// Sorting by the number of 1s bits is the key technique here for directly obtaining a transitive
			/// closure with no redundant links. As a metric of partial order that is automatically updated as
			/// new types are added to the lattice, it is the most efficient method I've found. The earlier code
			/// used the 'm_level' field, but its calculation is graph-global and the result becomes unreliable
			/// very quickly as GLBs are discovered.
			/// After much experimentation, I've settled on least-to-most 1s order for the array of child 
			/// candidates.
			/// it so happens that if the first type in the list is a leaf, then the only non-leaf
			/// (out-of-sequence) types which can be in that list must all be GLBs (because we added
			/// from the original-bits list in top-to-bottom order, and even though those types'
			/// parents get added before the original-type itself, its parents must all have more
			/// bits than itself), and this in turn more importantly guarantees that all of the
			/// non-GLBs are leaf types.
			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			void pick_children()
			{
				if (!leafwards.IsSorted(Type.Compare.DescendantCount.Reverse))
				{
					leafwards.Sort(Type.Compare.DescendantCount.Reverse);
#if LATTICE_TESTING
					TdlLattice.c_sort++;
				}
				TdlLattice.c_tot++;
#else
				}
#endif
				int c;
				pick_children_body(leafwards.GetUntrimmed(out c), c);
			}

			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			/// From the node we are adding as a child below the new GLB, remove all edges (if any) 
			/// to the parents in their (upper) set intersection, that is, to any common parent. 
			/// For the new_child, they are all now covered by a single edge to the GLB, which 
			/// we also add.
			/// This works from both ends of the array at the same time. You must to select candidates
			/// in order of most-to-fewest 1s, yet check for the types they subsume--and thus eliminate--
			/// starting with the fewest 1s (to prevent eliminating the more powerful types too early,
			/// and to eliminate more types in earlier passes. The process meets somewhere in the middle,
			/// and is complete when there are no candidates left.
			///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
			void pick_children_body(Type[] arr, int c)
			{
				int i = 0;
				do
				{
					/// Pick the best child from the candidates and add the GLB
					Type child;
					BitArr u_code = (child = arr[i++]).bit_vec;
					child.SwitchParentTo(glb);

					if (i == c)
						return;

					/// switch to a simple mode of just taking the rest when upon encountering the
					/// first leaf. all of the (remaining) candidate children are leaves
					Type t;
					int src, dst, uco;
					if ((uco = u_code.OnesCount) == 1)
					{
						do
							arr[i++].SwitchParentTo(glb);
						while (i < c);
						return;
					}

					/// remove subsumed leaf nodes by testing their one-and-only bit. This is
					/// faster than the whole gangwise bit operation.
					/// otherwise, do the full operation
					src = dst = i;
					do
						if ((t = arr[src]).bit_vec.OnesCount > uco ||
							t.c_children == 0 ? !u_code[t.bit_num] : !t.bit_vec.IsSubsumedBy(u_code))
						{
							if (src != dst)
								arr[dst] = t;
							dst++;
						}
					while (++src < c);
					c = dst;
				}
				while (i < c);
			}
		};
	};
}
