///
// * jdbm LICENSE v1.00
// *
// * Redistribution and use of this software and associated documentation
// * ("Software"), with or without modification, are permitted provided
// * that the following conditions are met:
// *
// * 1. Redistributions of source code must retain copyright
// *    statements and notices.  Redistributions must also contain a
// *    copy of this document.
// *
// * 2. Redistributions in binary form must reproduce the
// *    above copyright notice, this list of conditions and the
// *    following disclaimer in the documentation and/or other
// *    materials provided with the distribution.
// *
// * 3. The name "jdbm" must not be used to endorse or promote
// *    products derived from this Software without prior written
// *    permission of Cees de Groot.  For written permission,
// *    please contact cg@cdegroot.com.
// *
// * 4. Products derived from this Software may not be called "jdbm"
// *    nor may "jdbm" appear in their names without prior written
// *    permission of Cees de Groot.
// *
// * 5. Due credit should be given to the jdbm Project
// *    (http://jdbm.sourceforge.net/).
// *
// * THIS SOFTWARE IS PROVIDED BY THE ndbm PROJECT AND CONTRIBUTORS
// * ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
// * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
// * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL
// * CEES DE GROOT OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// * OF THE POSSIBILITY OF SUCH DAMAGE.
// *
// * Copyright 2000 (C) Cees de Groot. All Rights Reserved.
// * Contributions are Copyright (C) 2000 by their associated contributors.
// *
// 

//*************************************************************************
//**  Included in JDMB 1.0 port to SharpDBM;  11/2013 Cyrus Neah cneah@codingreal.com
//**  SharpDBM is an independent reimplementation of the JDBM 1.0 software library in C#  
//*************************************************************************

using System.Runtime.CompilerServices;
using System.Collections;
using System;

namespace SharpDBM.htree
{

	using RecordManager = SharpDBM.RecordManager;

	using FastIterator = SharpDBM.helper.FastIterator;
	using IterationException = SharpDBM.helper.IterationException;
	using System.IO;
	using System.Runtime.Serialization.Formatters.Binary;
   



///
// *  Hashtable directory page.
// *
// *  @author <a href="mailto:boisvert@exoffice.com">Alex Boisvert</a>
// *  @version $Id: HashDirectory.java,v 1.5 2005/06/25 23:12:32 doomdark Exp $
// 
	[Serializable]
	public   class HashDirectory : HashNode
	{

		internal const long serialVersionUID = 1L;


//    *
//     * Maximum number of children in a directory.
//     *
//     * (Must be a power of 2 -- if you update this value, you must also
//     *  update BIT_SIZE and MAX_DEPTH.)
//     
		internal const int MAX_CHILDREN = 256;


//    *
//     * Number of significant bits per directory level.
//     
		internal const int BIT_SIZE = 8; // log2(256) = 8


//    *
//     * Maximum number of levels (zero-based)
//     *
//     * (4 * 8 bits = 32 bits, which is the size of an "int", and as
//     *  you know, hashcodes in Java are "ints")
//     
		internal const int MAX_DEPTH = 3; // 4 levels


//    *
//     * Record ids of children pages.
//     
		private long[] _children;


//    *
//     * Depth of this directory page, zero-based
//     
		private sbyte _depth;


//    *
//     * PageManager used to persist changes in directory and buckets
//     
		[System.NonSerialized]
		private RecordManager _recman;


//    *
//     * This directory's record ID in the PageManager.  (transient)
//     
		[System.NonSerialized]
		private long _recid;


//    *
//     * Public constructor used by serialization
//     
		public HashDirectory()
		{
		// empty
		}

//    *
//     * Construct a HashDirectory
//     *
//     * @param depth Depth of this directory page.
//     
		public HashDirectory(sbyte depth)
		{
			_depth = depth;
            _children = new long[MAX_CHILDREN];
		}


//    *
//     * Sets persistence context.  This method must be called before any
//     * persistence-related operation.
//     *
//     * @param recman RecordManager which stores this directory
//     * @param recid Record id of this directory.
//     
		internal virtual void setPersistenceContext(RecordManager recman, long recid)
		{
			this._recman = recman;
			this._recid = recid;
		}


//    *
//     * Get the record identifier used to load this hashtable.
//     
		internal virtual long getRecid()
		{
			return _recid;
		}


//    *
//     * Returns whether or not this directory is empty.  A directory
//     * is empty when it no longer contains buckets or sub-directories.
//     
		internal virtual bool isEmpty()
		{
			for (int i=0; i<_children.Length; i++)
			{
				if (_children[i] != 0)
				{
					return false;
				}
			}
			return true;
		}

//    *
//     * Returns the value which is associated with the given key. Returns
//     * <code>null</code> if there is not association for this key.
//     *
//     * @param key key whose associated value is to be returned
//     
 
//
		internal virtual object @get(object key)
		{
			int hash = GetHashCode(key);
			long child_recid = _children[hash];
			if (child_recid == 0)
			{
			// not bucket/page --> not found
				return null;
			}
			else
			{
				HashNode node = (HashNode) _recman.fetch(child_recid);
			// System.out.println("HashDirectory.get() child is : "+node);

				if (node is HashDirectory)
				{
				// recurse into next directory level
					HashDirectory dir = (HashDirectory) node;
					dir.setPersistenceContext(_recman, child_recid);
					return dir.get(key);
				}
				else
				{
				// node is a bucket
					HashBucket bucket = (HashBucket) node;
					return bucket.getValue(key);
				}
			}
		}


//    *
//     * Associates the specified value with the specified key.
//     *
//     * @param key key with which the specified value is to be assocated.
//     * @param value value to be associated with the specified key.
//     * @return object which was previously associated with the given key,
//     *          or <code>null</code> if no association existed.
//     
 
//
		internal virtual object put(object key, object @value)
		{
			if (@value == null)
			{
				return remove(key);
			}
			int hash = GetHashCode(key);
			long child_recid = _children[hash];
			if (child_recid == 0)
			{
			// no bucket/page here yet, let's create a bucket
				HashBucket bucket = new HashBucket(_depth+1);

			// insert (key,value) pair in bucket
				object existing = bucket.addElement(key, @value);

				long b_recid = _recman.insert(bucket);
				_children[hash] = b_recid;

				_recman.update(_recid, this);

			// System.out.println("Added: "+bucket);
				return existing;
			}
			else
			{
				HashNode node = (HashNode) _recman.fetch(child_recid);

				if (node is HashDirectory)
				{
				// recursive insert in next directory level
					HashDirectory dir = (HashDirectory) node;
					dir.setPersistenceContext(_recman, child_recid);
					return dir.put(key, @value);
				}
				else
				{
				// node is a bucket
					HashBucket bucket = (HashBucket)node;
					if (bucket.hasRoom())
					{
						object existing = bucket.addElement(key, @value);
						_recman.update(child_recid, bucket);
					// System.out.println("Added: "+bucket);
						return existing;
					}
					else
					{
					// overflow, so create a new directory
						if (_depth == MAX_DEPTH)
						{
							throw new Exception("Cannot create deeper directory. " + "Depth=" + _depth);
						}
						HashDirectory dir = new HashDirectory((sbyte)(_depth+1));
						long dir_recid = _recman.insert(dir);
						dir.setPersistenceContext(_recman, dir_recid);

						_children[hash] = dir_recid;
						_recman.update(_recid, this);

					// discard overflown bucket
						_recman.delete(child_recid);

					// migrate existing bucket elements
						ArrayList keys = bucket.getKeys();
						ArrayList values = bucket.getValues();
						int entries = keys.Count;
						for (int i=0; i<entries; i++)
						{
							dir.put(keys[i], values[i]);
						}

					// (finally!) insert new element
						return dir.put(key, @value);
					}
				}
			}
		}


//    *
//     * Remove the value which is associated with the given key.  If the
//     * key does not exist, this method simply ignores the operation.
//     *
//     * @param key key whose associated value is to be removed
//     * @return object which was associated with the given key, or
//     *          <code>null</code> if no association existed with given key.
//     
 
//
		internal virtual object remove(object key)
		{
			int hash = GetHashCode(key);
			long child_recid = _children[hash];
			if (child_recid == 0)
			{
			// not bucket/page --> not found
				return null;
			}
			else
			{
				HashNode node = (HashNode) _recman.fetch(child_recid);
			// System.out.println("HashDirectory.remove() child is : "+node);

				if (node is HashDirectory)
				{
				// recurse into next directory level
					HashDirectory dir = (HashDirectory)node;
					dir.setPersistenceContext(_recman, child_recid);
					object existing = dir.remove(key);
					if (existing != null)
					{
						if (dir.isEmpty())
						{
						// delete empty directory
							_recman.delete(child_recid);
							_children[hash] = 0;
							_recman.update(_recid, this);
						}
					}
					return existing;
						}
				else
				{
				// node is a bucket
					HashBucket bucket = (HashBucket)node;
					object existing = bucket.removeElement(key);
					if (existing != null)
					{
						if (bucket.getElementCount() >= 1)
						{
							_recman.update(child_recid, bucket);
						}
						else
						{
						// delete bucket, it's empty
							_recman.delete(child_recid);
							_children[hash] = 0;
							_recman.update(_recid, this);
						}
					}
					return existing;
				}
			}
		}

//    *
//     * Calculates the hashcode of a key, based on the current directory
//     * depth.
//     
		public  int GetHashCode(object key)
		{
			int hashMask = this.hashMask();
			int hash = key.GetHashCode();
			hash = hash & hashMask;

			hash = (int)((uint)hash >> ((MAX_DEPTH - _depth) * BIT_SIZE));
			hash = hash % MAX_CHILDREN;
//        
//        Console.WriteLine("HashDirectory.hashCode() is: 0x"
//                           +Integer.toHexString(hash)
//                           +" for object hashCode() 0x"
//                           +Integer.toHexString(key.hashCode()));
//        
			return hash;
		}

//    *
//     * Calculates the hashmask of this directory.  The hashmask is the
//     * bit mask applied to a hashcode to retain only bits that are
//     * relevant to this directory level.
//     
		internal virtual int hashMask()
		{
			int bits = MAX_CHILDREN-1;
			int hashMask = bits << ((MAX_DEPTH - _depth) * BIT_SIZE);
//        
//        Console.WriteLine("HashDirectory.hashMask() is: 0x"
//                           +Integer.toHexString(hashMask));
//        
			return hashMask;
		}

//    *
//     * Returns an enumeration of the keys contained in this
//     

		internal virtual FastIterator keys()
		{
			return new HDIterator(true,this);
		}

//    *
//     * Returns an enumeration of the values contained in this
//     

		internal virtual FastIterator values()
		{
			return new HDIterator(false,this);
		}


//    *
//     * Implement Externalizable interface
//     

		public virtual void writeExternal(FileStream @out)
		{
			@out.WriteByte(Convert.ToByte(_depth));
			BinaryFormatter bwrite = new BinaryFormatter();
			bwrite.Serialize(@out, _children);
			
		}


//    *
//     * Implement Externalizable interface
//     
		[MethodImpl(MethodImplOptions.Synchronized)]
		public virtual void readExternal(FileStream @in)
		{
			_depth = Convert.ToSByte(@in.ReadByte());
			BinaryFormatter bread = new BinaryFormatter();
			_children = (long[])bread.Deserialize(@in);
			
		}


	////////////////////////////////////////////////////////////////////////
	// INNER CLASS
	////////////////////////////////////////////////////////////////////////

//    *
//     * Utility class to enumerate keys/values in a HTree
//     
		public class HDIterator : FastIterator
		{

//        *
//         * True if we're iterating on keys, False if enumerating on values.
//         
			private bool _iterateKeys;

//        *
//         * Stacks of directories & last enumerated child position
//         
			private ArrayList _dirStack;
			private ArrayList _childStack;

//        *
//         * Current HashDirectory in the hierarchy
//         
			private HashDirectory _dir;

//        *
//         * Current child position
//         
			private int _child;

//        *
//         * Current bucket iterator
//         
			//private Iterator _iter;
			
			private IEnumerator _iter;
			

//        *
//         * Construct an iterator on this directory.
//         *
//         * @param iterateKeys True if iteration supplies keys, False
//         *                  if iterateKeys supplies values.
//         
			public HDIterator(bool iterateKeys, HashDirectory outerHashDirectory)
			{
				_dirStack = new ArrayList();
				_childStack = new ArrayList();
				_dir = outerHashDirectory;
				_child = -1;
				_iterateKeys = iterateKeys;

				prepareNext();
			}


//        *
//         * Returns the next object.
//         
			public override object next()
			{
				object next = null;
                bool hasnext = _iter.MoveNext();

                if (_iter != null && hasnext )
				{
					//next = _iter.MoveNext();
                    next = _iter.Current;
				}
				else
				{
				  try
				  {
					prepareNext();
				  }
				  catch (IOException except)
				  {
					throw new Exception(except.Message);
				  }
                  bool checkNext = _iter.MoveNext();
                  if (_iter != null && checkNext)
				  {
                      return _iter.Current; 
				  }
				}
				return next;
			}


//        *
//         * Prepare internal state so we can answer <code>hasMoreElements</code>
//         *
//         * Actually, this code prepares an Enumeration on the next
//         * Bucket to enumerate.   If no following bucket is found,
//         * the next Enumeration is set to <code>null</code>.
//         

			public  void prepareNext()
			{
				long child_recid = 0;

			// find next bucket/directory to enumerate
				do
				{
					_child++;
					if (_child >= MAX_CHILDREN)
					{

						if (_dirStack.Count == 0)
						{
						// no more directory in the stack, we're finished
							return;
						}

					// try next page
						//_dir = (HashDirectory) _dirStack.Remove(_dirStack.Count-1);
						_dir = (HashDirectory)_dirStack[_dirStack.Count - 1];
						_dirStack.RemoveAt(_dirStack.Count - 1);
						//_child = ((int) _childStack.Remove(_childStack.Count-1)).intValue();
						_child = ((int)_childStack[_childStack.Count - 1]);
						_childStack.RemoveAt(_childStack.Count - 1);
						continue;
					}
					child_recid = _dir._children[_child];
				} while (child_recid == 0);

				if (child_recid == 0)
				{
					throw new Exception("child_recid cannot be 0");
				}

				HashNode node = (HashNode) _dir._recman.fetch(child_recid);
			// Console.WriteLine("HDEnumeration.get() child is : "+node);

				if (node is HashDirectory)
				{
				// save current position
					_dirStack.Add(_dir);
					_childStack.Add(_child);

					_dir = (HashDirectory)node;
					_child = -1;

				// recurse into
					_dir.setPersistenceContext(_dir._recman, child_recid);
					prepareNext();
				}
				else
				{
				// node is a bucket
					HashBucket bucket = (HashBucket)node;
					if (_iterateKeys)
					{
						_iter = bucket.getKeys().GetEnumerator();
					}
					else
					{
						_iter = bucket.getValues().GetEnumerator();
					}
				}
			}
		}

	}


}