using System;
using System.IO;
using System.Data;
using System.Threading;
using System.Diagnostics;
using System.Collections;
using System.Globalization;
using System.Collections.Generic;

/****
 * 
 * Server side will keep the table for the life time of the transport
 * Server creates a table container, whose proxy gets sent to client. (MarshalByRef)
 * The reference contains a 'Remoting plan', which, at client side, spawns N threads, 
 * collects the chunks, and then merges them back together.
 * server frees resources

 * 
 *	***	Server	*** 


 * using System.Data;
 * 
 *	public class Server : MarshalByRefObject
 *	{
 *
 *		public ITransporter GetCustomers()
 *		{
 *          DataTable dtCust = DBAdapter.Fill("Select * from Customers");
 *			return new TableTransporter(dtCust);
 *		}
 *
 * 
 *	***	Client	*** 
 	class TheClient
 * 	{
 * 		DataTable Customers
 * 		{
 * 			get
 * 			{
 * 				Server theServer = (Server)Activator.GetObject(
 *					typeof(Server), "tcp://12.342.18.81:6555/CT");
 * 				
 *				DataTable dtCust = theServer.GetCustomers().DataTable;
 * 				return dtCust;				
 * 			}
 * 		}
 * 		...
 * 	}
 * ****/


namespace ChunkTransporter
{
	#region misc
	/// <summary>
	/// object lives at client process, passed to server
	/// server can poll to see whether client had disconnected
	/// </summary>
	public class RemoteClient : MarshalByRefObject
	{ }
	public enum MARSHAL_METHOD { STRING_ARRAY, OBJECT_ARRAY, DATATABLE };
	public interface IFilter
	{
		DataTable doFilter(DataTable raw);
	}

	#endregion

	#region RemotingThread
	class RemotingThread
	{
		#region members
		Filter[] theFilters;
		DataTable mDestinationTable;
		Transporter mTransporter;
		SADataContainer Container
		{
			get { return mTransporter.mStore; }
		}
		#endregion

		internal RemotingThread(ref DataTable dt, Filter[] filters, Transporter transporter)
		{
			mDestinationTable = dt;
			theFilters = filters;
			mTransporter = transporter;
		}

		public Thread C_start()
		{
			Thread retval = new Thread(new ThreadStart(C_Fetch));
			retval.Start();
			return retval;
		}

		void C_Fetch()
		{
			if (Thread.CurrentThread.Name == null)
				Thread.CurrentThread.Name = "Remoting ChunkTransporter worker thread";
			try
			{
				//	Trace.Write(String.Format("{0}.Fetch({1}, {2})", mTable.Name, theFilters[0].firstRecord, theFilters[theFilters.Length-1].lastRecord));  
				List<DataTable> chunks = C_FetchChunks();
				Merge(chunks);
			}
			catch (Exception e)
			{
				Helper.LogException("RemotingThread.Fetch()", e);
			}
		}


		void Merge(List<DataTable> chunks)
		{
			if (chunks.Count== 0)
				return;

			lock (mDestinationTable)
			{
				System.Diagnostics.Trace.WriteLine("--> " + (DateTime.Now - DateTime.Today).ToString());
				DateTime startTime = DateTime.Now;
				//mDestinationTable.Merge(chunk, true, MissingSchemaAction.Add);
				foreach (DataTable t in chunks)
					foreach (DataRow dr in t.Rows)
						mDestinationTable.Rows.Add(dr.ItemArray);

				mTransporter.TotalProcessing += DateTime.Now - startTime;
				System.Diagnostics.Trace.WriteLine("<-- " + (DateTime.Now - DateTime.Today).ToString());

			}
		}

		DataTable C_FetchChunk(Filter filter)
		{
			DataTable dt;
			TimeSpan remotingTime, processingTime = TimeSpan.FromSeconds(0);
			DateTime startTime = DateTime.Now;

			switch (Transporter.MarshalMethod)
			{
				case (MARSHAL_METHOD.OBJECT_ARRAY):

					object[,] objArr = Container.S_ChunkObj(filter);
					remotingTime = DateTime.Now - startTime;

					startTime = DateTime.Now;
					dt = DataTableHelper.Array2DataTable(objArr);
					processingTime = DateTime.Now - startTime;
					break;
				case (MARSHAL_METHOD.DATATABLE):
					dt = Container.S_ChunkDataTable(filter);
					remotingTime = DateTime.Now - startTime;
					break;
				case (MARSHAL_METHOD.STRING_ARRAY):
				default:
					{
						string[] strArr = Container.ChunkString(filter);
						remotingTime = DateTime.Now - startTime;

						startTime = DateTime.Now;
						dt = DataTableHelper.String2DataTable(strArr);
						processingTime = DateTime.Now - startTime;
						break;
					}
			}
			mTransporter.TotalRemotingTime += remotingTime;
			mTransporter.TotalProcessing += processingTime;

			Trace.WriteLine(string.Format("{0}   {1}", mDestinationTable.TableName, filter));
			Helper.TRACE("{0}   {1}-{2}", mDestinationTable.TableName, filter.firstRecord, filter.lastRecord);

			return dt;
		}

		List<DataTable> C_FetchChunks()
		{
			List<DataTable> chunks = new List<DataTable>();
			
			Debug.Assert(theFilters.Length > 0);
			foreach (Filter f in theFilters)
			{
				DataTable snippet = C_FetchChunk(f);
				if (snippet == null || snippet.Rows.Count == 0)
					continue;

				snippet.PrimaryKey = null;

				chunks.Add(snippet);
			}
			return chunks;
		}


	}
	#endregion

	#region Data Container
	/// <summary>
	/// This object will hold a snapshot of the data to be chunked.
	/// Client will hold a reference to it.
	/// This object lives in the server.
	/// Server won't have a reference to it.  It's life span is determined by the client.
	/// </summary>
	public class SADataContainer : MarshalByRefObject, IDisposable
	{
		object mSizeBytes = new int();
		// members
		DataTable theBlog;
		string mTableName;
		DateTime mSnapshotTime = DateTime.Now;
		//static object mSyncObj = new object();
		static AutoResetEvent mEvent = new AutoResetEvent(true);
		bool mIsSynchronized = false;
		RemoteClient mRemoteClient;
		System.Threading.Timer mTimer;
		DateTime mLastClientRequest = DateTime.Now;
		object mLastClientRequestSync = new object();
		static int mCount;
		bool mIsTrim = false;
		int mClientConnectionFalures = 0;

		public static int Count
		{
			get
			{
				return mCount;
			}
		}



		/// <summary>
		/// Lives in the server, client gets proxy to this object, through which chunks are obtained.
		/// </summary>
		/// <param name="dt"></param>
		public SADataContainer(DataTable dt, bool isTrim)
		{
			mIsTrim = isTrim;
			Data = dt;

		}
		public SADataContainer(bool isSynchronized, bool isTrim)
		{
			mIsTrim = isTrim;
			mIsSynchronized = isSynchronized;
			if (mIsSynchronized)
				Lock(null);
		}
		/// <summary>
		/// Called by the Transporter to get next chunk.
		/// </summary>
		/// <param name="filter"></param>
		/// <returns></returns>
		public string[] ChunkString(Filter filter)
		{
			try
			{
				try
				{
					Helper.ThreadName = String.Format("Chunk - {0}-{1}", theBlog.TableName, this.mRemoteClient.ToString());
				}
				catch { }

				lock (mLastClientRequestSync)
					mLastClientRequest = DateTime.Now;

				// thread transport has timed out
				if (theBlog == null)
					return null;

				string[] chunks = DataTableHelper.DataRows2String(filter.S_doFilter(theBlog), mIsTrim);
				if (chunks != null)
				{
					lock (mSizeBytes)
					{
						foreach (string chunk in chunks)
							mSizeBytes = (int)mSizeBytes + chunk.Length * sizeof(System.Char);
					}
				}
				return chunks;
			}
			catch (Exception e)
			{ Helper.LogException("Chunk()", e); throw; }

		}
		public object[,] S_ChunkObj(Filter filter)
		{
			lock (mLastClientRequestSync)
				mLastClientRequest = DateTime.Now;
			Debug.Assert(theBlog != null);
			return null;//DataTableHelper.DataTable2Array(filter.S_doFilter(theBlog));
		}
		public DataTable S_ChunkDataTable(Filter filter)
		{
			try
			{
				try
				{
					Helper.ThreadName = String.Format("Chunk - {0}-{1}", theBlog.TableName, this.mRemoteClient.ToString());
				}
				catch { }

				lock (mLastClientRequestSync)
					mLastClientRequest = DateTime.Now;

				// thread transport has timed out
				if (theBlog == null)
					return null;

				DataRow[] rows = filter.S_doFilter(theBlog);
				if (rows == null)
					return null;

				if (this.theBlog == null)
				{
					Helper.Alert("Data container already destroyed! [timeout?] Can't continue!");
					throw new Exception("Client request timed out, Resources have already been freed.");
				}
				DataTable chunk = this.theBlog.Clone();
				chunk.CaseSensitive = true;
				chunk.RemotingFormat = SerializationFormat.Binary;

				chunk.PrimaryKey = null;
				chunk.Constraints.Clear();
				chunk.BeginLoadData();

				foreach (DataRow dr in rows)
					chunk.Rows.Add(dr.ItemArray); //<-- faster
				//chunk.ImportRow(dr);   <-- slower
				chunk.EndLoadData();

				return chunk;
			}
			catch (Exception e)
			{ Helper.LogException("Chunk()", e); throw; }
		}

		void SetupTimer()
		{
			// Create the delegate that invokes methods for the timer.
			TimerCallback timerDelegate = new TimerCallback(SampleClient);

			if (mTimer != null)
				mTimer.Dispose();
			// Create a timer that waits one second, then invokes every second.
			mTimer = new Timer(timerDelegate, null, 1000 * 60, 1000 * 60);
		}
		public void Register(ref RemoteClient rc)
		{
			// add remote client to collection
			Debug.Assert(mRemoteClient == null);
			mRemoteClient = rc;
			mLastClientRequest = DateTime.Now;
			SetupTimer();
		}


		internal DataTable Data
		{
			set
			{
				theBlog = value;
				mTableName = theBlog.TableName;
				Interlocked.Increment(ref mCount);
			}
		}


		Filter[] Split(DataTable dt)
		{
			int numChunks = 1 + (dt.Rows.Count / Config.ChunkSize);

			Debug.Assert(numChunks > 0);

			int current = 0;
			Filter[] filters = new Filter[numChunks];
			for (int j = 0; j < numChunks; j++)
			{
				if (current >= dt.Rows.Count)
					filters[j] = new Filter(-1, -1);
				else
					filters[j] = new Filter(current, Math.Min(current + Config.ChunkSize, dt.Rows.Count) - 1);
				current += Config.ChunkSize;
			}
			Debug.Assert(current >= dt.Rows.Count);
			return filters;
		}


		void SampleClient(object status)
		{
			Helper.ThreadName = mTableName + ".Chunk Transporter.SampleClient";

			if (mTimer != null)
				mTimer.Dispose();
			mTimer = null;
			bool isUp = true;
			try
			{
				lock (mLastClientRequestSync)
				{
					if (DateTime.Now.Subtract(mLastClientRequest) > TimeSpan.FromSeconds(Config.RemotingTimeout))
					{
						Helper.Alert("Client requests timed out after {0} seconds.  releasing resources...", Config.RemotingTimeout);
						isUp = false;
					}
				}

				try
				{
					string s = mRemoteClient.ToString();
					mClientConnectionFalures = 0;
				}
				catch (Exception e)
				{
					mClientConnectionFalures++;
					if (mClientConnectionFalures >= 3)
					{
						isUp = false;
						Helper.Alert("SampleClient() Client disconnected in transit, Cleaning Up after {0} retries... {1}", mClientConnectionFalures, e.Message);
					}


				}
				if (!isUp)
					Dispose();
			}
			finally
			{
				if (isUp)
					SetupTimer();

			}
		}

		void Lock(string clientInfo)
		{
			mEvent.WaitOne();
			Detail("Locking snapshot On behalf of {0}", clientInfo);
		}
		void Unlock(bool isGraceful)
		{
			// called by client
			if (isGraceful)
				Detail("Unlocking snapshot {0}", mTableName);
			else
				Helper.Alert("Client disconnected in transit, Unlocking snapshot ");

			mEvent.Set();
		}
		#region IDisposable Members
		private bool mIsDisposed = false;
		private bool Disposed
		{
			get
			{
				lock (this)
				{ return mIsDisposed; }
			}
		}

		public void Dispose()
		{
			lock (this)
			{
				if (Disposed)
					return;
				try
				{
					if (mTimer != null)
						mTimer.Dispose();
					mTimer = null;
					if (theBlog != null)
					{
						if (0 == (int)mSizeBytes)
							mSizeBytes = DataTableHelper.InterpolateTableSize(theBlog);
						Detail("DataContainer object destroyed for {0} Remoting time:{1}[sec] {2} records {3} bytes", theBlog.TableName, DateTime.Now.Subtract(mSnapshotTime).TotalSeconds, Helper.FormatLong(theBlog.Rows.Count), Helper.FormatLong((int)mSizeBytes));

						if (mIsSynchronized)
							Unlock(true);

						theBlog.Dispose();
						theBlog = null;
						Interlocked.Decrement(ref mCount);
					}
				}
				catch (Exception e)
				{
					Helper.LogException("DataContainer", e);
				}
				finally
				{
					mIsDisposed = true;
				}
			}
		}

		#endregion
		#region .NET Remoting
		/// <summary>
		/// This method is called by the framework.
		/// </summary>
		public override Object InitializeLifetimeService()
		{
			System.Runtime.Remoting.Lifetime.ILease lease = (System.Runtime.Remoting.Lifetime.ILease)base.InitializeLifetimeService();
			if (lease.CurrentState == System.Runtime.Remoting.Lifetime.LeaseState.Initial)
				lease.InitialLeaseTime = TimeSpan.Zero;
			return lease;
		}

		#endregion
		#region logger
		/// <exclude/>
		static protected void Detail(string format, params object[] args)
		{
			Helper.TRACE(format, args);
		}

		/// <exclude/>
		static public void TRACE(string format, params object[] args)
		{
			Helper.TRACE(format, args);
		}

		#endregion

	}
	#endregion

	#region Transporter

	public interface ITransporter : IDisposable
	{
		/// <summary>
		/// server sets the data table, client retrieves it.
		/// </summary>
		DataTable DataTable
		{
			get;
		}
	}

	[Serializable]
	public abstract class Transporter : IDisposable, ITransporter
	{
		#region members
		public TimeSpan TotalRemotingTime;
		public TimeSpan TotalProcessing;
		public TimeSpan TotalTransportTime;
		internal SADataContainer mStore;
		DataTable theTable;

		Thread[] theThreads;
		int mRowCount;
		int mNumChunks;
		int CHUNKSIZE = Config.ChunkSize;
		int mThreadCount = Config.ThreadCount;
		Transporters mTransporters;
		public static MARSHAL_METHOD MarshalMethod = MARSHAL_METHOD.DATATABLE;
		string mTableName;
		#endregion

		/// <summary>
		/// Client calls DataTable which returns the merge of all chunks.
		/// </summary>
		public DataTable DataTable
		{
			get
			{
				if (mTransporters != null)
					throw new Exception("This transporter returns a DataSet");
				if (mStore == null)
					return SmallTable;
				return LargeTable;
			}
		}

		/// <summary>
		/// Client calls DataSet which returns the merge of all chunks.
		/// </summary>
		protected System.Data.DataSet DataSet
		{
			get
			{
				if (mTransporters == null)
					throw new Exception("This transporter returns a DataTable");
				Debug.Assert(mTransporters != null);
				return mTransporters.DataSet;
			}
		}

		#region C'tor
		/// <summary>
		/// C'tor() - used by server
		/// </summary>
		/// <param name="table">the (Potentialy) large table to be diced.</param>
		public Transporter(DataTable table)
			: this(table, Config.RemotingTrim)
		{

		}
		public Transporter(DataTable table, bool isTrim)
		{
			mStore = new SADataContainer(false, isTrim);
			Init(table);
		}
		public Transporter()
		{
			mStore = new SADataContainer(this is TableTransporterSynchronized, Config.RemotingTrim);
		}
		public Transporter(System.Data.DataSet dataset)
		{
			mTransporters = new Transporters(dataset);
		}
		#endregion
		public DataTable Data
		{
			set
			{
				Init(value);
			}
		}


		#region helpers
		void Init(DataTable table)
		{
			if (table == null)
			{
				mTableName = "null";
				mNumChunks = mRowCount = mThreadCount = 0;
				mStore.Dispose();
				mStore = null;
				theTable = table;
				return;
			}
			mTableName = table.TableName;
			mRowCount = table.Rows.Count;
			
			mNumChunks = mRowCount / CHUNKSIZE;
			if (IsChunkingEnabled && isLarge(table) || this is TableTransporterSynchronized)
			{
				Debug.Assert(mStore != null);
				mStore.Data = table;
				Helper.TRACE("{0}.Remoting plan: RowCount:{1} ThreadCount:{2} NumChunks:{3} ChunkSize:{4} ",
					mTableName, Helper.FormatLong(mRowCount), mThreadCount, mNumChunks, CHUNKSIZE);
				theTable = table.Clone();
			}
			else
			{
				mStore.Dispose();
				mStore = null;
				theTable = table;
			}
		}


		void C_ExecThreads()
		{

			Debug.Assert(mThreadCount > 0);
			theThreads = new Thread[mThreadCount];
			long chunkCountPerThread = 1 + (mNumChunks / theThreads.Length);
			Debug.Assert(chunkCountPerThread > 0);

			int current = 0;
			for (int i = 0; i < theThreads.Length; i++)
			{
				Filter[] filters = new Filter[chunkCountPerThread];
				for (int j = 0; j < chunkCountPerThread; j++)
				{
					if (current >= mRowCount)
						filters[j] = new Filter(-1, -1);
					else
						filters[j] = new Filter(current, Math.Min(current + CHUNKSIZE, mRowCount) - 1);
					current += CHUNKSIZE;
				}
				RemotingThread rt = new RemotingThread(ref theTable, filters, this);
				theThreads[i] = rt.C_start();
			}
			Debug.Assert(current >= mRowCount);
		}

		void WaitForEmThreads()
		{
			try
			{
				foreach (System.Threading.Thread t in theThreads)
					t.Join();
			}
			catch (Exception e)
			{
				Debug.WriteLine(e);
			}
		}


		bool isLarge(DataTable table)
		{
			return table.Rows.Count >= Config.RemotingThresholdTableSize;
		}
		bool IsChunkingEnabled
		{
			get
			{
				return Config.ChunkingEnabled;
			}
		}

		void RegisterWithServer()
		{
			RemoteClient rc = new RemoteClient();

			try
			{
				// create an object which lives at the client, and register it with the server.
				mStore.Register(ref rc);
			}
			catch (System.Runtime.Remoting.RemotingException e)
			{
				Helper.LogException("Store.Register(ObjRef) Make sure channels are registered!", e);
				throw;
			}
			catch (Exception e)
			{
				Helper.LogException("Store.Register(ObjRef)", e);
			}
		}

		DataTable LargeTable
		{
			get
			{
				TotalRemotingTime = TotalProcessing = TotalTransportTime = TimeSpan.FromSeconds(0);
				DateTime startTime = DateTime.Now;
				try
				{
					mNumChunks = 1 + (mRowCount / CHUNKSIZE);
				
					mThreadCount = Math.Min(mThreadCount, mNumChunks);
					Debug.Assert(mThreadCount > 0);
					Debug.Assert(mNumChunks > 0);
					Debug.Assert(theTable != null);

					theTable.ExtendedProperties["SendingLocalTime"] = DateTime.Now;
					theTable.ExtendedProperties["origRowCount"] = mRowCount;
					theTable.ExtendedProperties["threadCount"] = mThreadCount;
					theTable.ExtendedProperties["numChunks"] = mNumChunks;
					theTable.ExtendedProperties["chunkSize"] = CHUNKSIZE;


					Trace.Write(String.Format("Remoting plan: OrigRowCount:{0} ThreadCount:{1} NumChunks:{2} ChunkSize:{3} Heuristic:{4}",
						mRowCount, mThreadCount, mNumChunks, CHUNKSIZE, CHUNKSIZE * mNumChunks));

					RegisterWithServer();

					theTable.BeginLoadData();

					// remove PK for speed.
					DataColumn[] thePK = theTable.PrimaryKey;
					theTable.PrimaryKey = null;
					theTable.Constraints.Clear();


					C_ExecThreads();
					System.Threading.Thread t = theThreads[0];
					WaitForEmThreads();
					try
					{
						theTable.EndLoadData();
					}
					catch (System.Data.ConstraintException)
					{
						DataTableHelper.FindNonUniques(theTable, theTable.PrimaryKey);
					}
					//reinstate PK
					theTable.PrimaryKey = thePK;

					Trace.Write(String.Format("Remoting plan: OrigRowCount:{0} ActualRowCount:{1} ThreadCount:{2} NumChunks:{3} ChunkSize:{4} for {5}",
						mRowCount, theTable.Rows.Count, mThreadCount, mNumChunks, CHUNKSIZE, mTableName));
					if (theTable.Rows.Count == 0 && mRowCount > 0 && Transporter.MarshalMethod != MARSHAL_METHOD.STRING_ARRAY)
					{
						Helper.Alert("Fetching chunk from server: server doesn't support {0} marshal method, reverting to STRING_ARRAY", Transporter.MarshalMethod);
						Transporter.MarshalMethod = MARSHAL_METHOD.STRING_ARRAY;
						return LargeTable;
					}
					Debug.Assert(mRowCount == theTable.Rows.Count, "ChunkTransporter." + theTable.TableName, "Original Row Count:" + mRowCount + "\nSynthesized Row Count:" + theTable.Rows.Count);
					return theTable;
				}
				catch (Exception e)
				{
					Helper.LogException("ChunkTransporter.LargeTable()", e);
					throw;
				}
				finally
				{
					Debug.Assert(mStore != null);
					if (mStore != null)
						mStore.Dispose();
					mStore = null;

					TotalTransportTime = DateTime.Now.Subtract(startTime);

					theTable.ExtendedProperties["Processing time"] = TotalProcessing;
					theTable.ExtendedProperties["Remoting Time"] = TotalRemotingTime;
					theTable.ExtendedProperties["Total Transport Time"] = TotalTransportTime;

					theTable.ExtendedProperties["Affective Remoting Time"] = TimeSpan.FromMilliseconds(TotalRemotingTime.TotalMilliseconds / mThreadCount);
					theTable.ExtendedProperties["Affective Processing Time"] = TimeSpan.FromMilliseconds(TotalProcessing.TotalMilliseconds / mThreadCount);

					theTable.ExtendedProperties["Marshal Method"] = MarshalMethod.ToString();
				}
			}
		}
		DataTable SmallTable
		{
			get
			{
				if (theTable != null)
				{
					theTable.ExtendedProperties.Remove("SendingTime");
					theTable.ExtendedProperties["SendingLocalTime"] = DateTime.Now;
				}
				return theTable;
			}
		}
		#endregion

		#region IDisposable Members

		private bool mIsDisposed = false;
		private bool Disposed
		{
			get
			{
				lock (this)
				{ return mIsDisposed; }
			}
		}
		public void Dispose()
		{
			lock (this)
			{
				if (!Disposed)
				{
					if (mStore != null)
						mStore.Dispose();

					mStore = null;
					theTable = null;

					theThreads = null;
				}
			}
		}

		#endregion
	}
	#endregion
 	
	#region DataTable Transporter

	/// <summary>
	/// Object created by the server, passed by value to the client, using .NET remoting 
	/// will return small tables as is, and large tables by chunks.
	/// </summary>
	[Serializable]
	public class TableTransporter : Transporter
	{
		public TableTransporter(DataTable LargeTable)
			: base(LargeTable)
		{ }
		public TableTransporter(DataTable LargeTable, bool isTrim)
			: base(LargeTable, isTrim)
		{ }

		public TableTransporter()
			: base()
		{ }

	}
	/// <summary>
	/// Two synchronized table transporters will be serialized, to minimize memory impact
	/// </summary>
	[Serializable]
	public class TableTransporterSynchronized : TableTransporter
	{
		public TableTransporterSynchronized()
			: base()
		{ }

	}

	#endregion

	#region DataSet Transpoerter
	/// <summary>
	/// The DataSetTransporter will transport each table in the dataset
	/// is returned by the .NET remoting server.
	/// </summary>
	[Serializable]
	public class DataSetTransporter : Transporter
	{
		public DataSetTransporter(System.Data.DataSet dataset)
			: base(dataset)
		{ }
		public new System.Data.DataSet DataSet
		{
			get
			{
				return base.DataSet;
			}
		}
	}
	[Serializable]
	public class Transporters
	{
		TableTransporter[] mTransporters;
		bool mIsCaseSensitive;
	
		internal Transporters(System.Data.DataSet orig)
		{
			try
			{
				mIsCaseSensitive = orig.CaseSensitive;
				//mDataSet = orig.Clone();
				mTransporters = new TableTransporter[orig.Tables.Count];
				int i = 0;
				foreach (DataTable dt in orig.Tables)
				{
					mTransporters[i++] = new TableTransporter(DataTableHelper.Select(dt, null));
				}
			}
			catch (Exception e)
			{
				Helper.LogException("Transporters", e);
				throw;
			}
		}
		public System.Data.DataSet DataSet
		{
			get
			{
				try
				{
					System.Data.DataSet ds = new System.Data.DataSet("Core rules");
					ds.CaseSensitive = mIsCaseSensitive;
					foreach (TableTransporter trans in mTransporters)
					{
						DataTable source = trans.DataTable;
						ds.Tables.Add(source);//[source.TableName]=
						trans.Dispose();
					}
					return ds;
				}
				catch (Exception e)
				{
					Helper.LogException("Transporters.DataSet", e);
					throw;
				}
			}
		}
	}

	#endregion

}

