﻿using System;
using System.Threading;

namespace DambachMulti.Processing
{	
	public class ProcessorPool
	{
		/// <summary>
		/// Total Number of Thunks assigned to Worker Threads
		/// </summary>
		protected long Thunks = 0;

		/// <summary>
		/// Number of Available Core Cores on the Current Machine
		/// </summary>
		protected int	CoreCount	= 1;
		private Core[]	Cores		= null;
		public int      WaitForNumThunks = 50; //how long to wait before executing

		//static fields
		private static bool				initialized		= false;
		private static ProcessorPool	currentProcPool	= null;		

		/// <summary>
		/// Get the static instance of the thread pool
		/// </summary>
		public static ProcessorPool Current { 
			get {
				if( !ProcessorPool.initialized ) { //I like the qualifier, lets someone know exactly where to look
					currentProcPool				= new ProcessorPool(); //no params...teehee...might want to change that...
					ProcessorPool.initialized	= true; //we don't want to come here again...
				}
				return currentProcPool;
			}
		}		

		//constructors
		public ProcessorPool() //for the time being, the concurrenator will divy up all Thunks equally
		{
			this.CoreCount = Environment.ProcessorCount;
			//Interlocked.Add( ref CoreCount, -1 ); //we want the front-end thread to have a dedicated processor too, otherwise, Thunks Cores will be thunk starved...
			Cores = new Core[this.CoreCount];

			for( int i = 0; i < this.CoreCount; Interlocked.Add( ref i, 1 ) )
				Cores[i] = new Core(); //they will have to wake us up when they are done
		}	//end constructor

        public void QueueWorkItem(ThunkMethod<object> method, Object param)
        {
            ProcessorPool procPool = this; //is this really going to be this easy...			

            int coreIndex = (((int)Math.Floor(((double)procPool.Thunks) % procPool.CoreCount)));
            Core core = procPool.Cores[coreIndex]; //so look for the first next processor in the list

            while (!Monitor.TryEnter(core.Thunks)) //if that core is busy, look to add to another blocking core
                core = procPool.Cores[(coreIndex++) % procPool.CoreCount]; //try to lock a cores threads until we get one

            core.Thunks.Add(new Thunk<Object>(method, param));
            Monitor.Exit(core.Thunks); //release lock on the core we added the thunks to

            procPool.Thunks++; //increment our thunk count
        }

		//can we cache the results of these parallel operations? Only if they are deterministic and return a value
		//In which case we will need a new Pooling Mechanism to do that
        /// <summary>
        /// Emulates the static nature of the Microsoft Provided 
        /// </summary>
        /// <param name="method"></param>
        /// <param name="param"></param>
		public static void QueueUserWorkItem( ThunkMethod<object> method, Object param )
		{
			ProcessorPool procPool = ProcessorPool.Current; //is this really going to be this easy...			
			
			int		coreIndex	= (((int)Math.Floor( ((double)procPool.Thunks) % procPool.CoreCount ) ) );
			Core	core		= procPool.Cores[ coreIndex  ]; //so look for the first next processor in the list

			while( !Monitor.TryEnter( core.Thunks ) ) //if that core is busy, look to add to another blocking core
				core = procPool.Cores[ (coreIndex++) % procPool.CoreCount ]; //try to lock a cores threads until we get one
			
			core.Thunks.Add( new Thunk<Object>( method, param ) );
			Monitor.Exit( core.Thunks ); //release lock on the core we added the thunks to
			
			procPool.Thunks++; //increment our thunk count
		}

		public static void Start()
		{
			foreach( Core core in ProcessorPool.Current.Cores )				
				core.BeginProcessing(); //now we will get 4				
		}

		public static void Finish()
		{
			foreach( Core core in ProcessorPool.Current.Cores )			
				core.BeginProcessing(); //now we will get 4 cores worth!!!
	
			foreach( Core core in ProcessorPool.Current.Cores )			
				core.FinishProcessing();		
		}
	}
}
