﻿using System;
using System.Threading;
using DambachMulti.DataStructures;

namespace DambachMulti.Processing
{
	/// <summary>
	/// A class that works in parallel over one kind of object, method inheritance would soooo work
	/// </summary>
	public class Multi //TODO Eliminate the dependencies on Action, will make the lib compatible with all versions of the framework...
	{
		public static void PooledFor( int a, int b, Action<int> method ) {
			int sign = Math.Sign( b - a );
			if( sign == 0 )
				return; //don't do anything if a and b are the same...
			PooledFor( a, b, sign, method ); //the sign is the same as the step in this case
		}	

		/// <summary>
		/// A Parallel For Loop implementation using 1 threadpool user work item per iteration<br />
		/// This method works excellently on Ray-Tracing and other heavy tasks.  It is the simplest and smallest <br />
		/// method I have come up with.  Very useful as a reference implementation and for use in long heavy computational loop bodies
		/// </summary>
		public static void PooledFor( int a, int b, int step, Action<int> method )
		{
			int	sign = Math.Sign( b - a	); //adding a comment
			if( sign != Math.Sign( step ) )
				throw new Exception( "Invalid step for range.  Loop will never complete." );
			step = Math.Abs( step ); //preserved sign in sign var
			
			int ops			= Math.Abs( b - a ) / step; //cases where true: (0 to 5) excluding 5(using the < operator) is 5 numbers so its equal to the difference
			int totalOps	= ops; //keep a copy tucked aside, we will modify ops to indicate when processing is finished, so it might get modified before we are done filling the thread pool
			if( ops == 0 ) //if there are no operations to perform
				return; //don't process when there is nothing to do, necessary to keep this from blocking when there is nothing to do...			
			int trueStep = (step*sign); //the num we add

			AutoResetEvent are = new AutoResetEvent( false ); //set it to false at first, so it waits

			for( int i = 0; i < totalOps; i += step ) //trusting them to give a valid step value right now
			{
				ThreadPool.QueueUserWorkItem( delegate( object param ) {					
					int x = (int)param;
              		method( a + (x*trueStep) ); //eek
					Interlocked.Decrement( ref ops );
					if( ops == 0 )
						are.Set();
              	}, i );
			}
			are.WaitOne(); //externally they are in the other order, to facilitate neater Do invocations using inline delegates
			return;
		}
		
		public static void For( int a, int b, Action<int> body ) {
			int sign = Math.Sign( b - a ); //the sign works as a step in these cases
			if( sign == 0 ) //there has to be a better way to simplify the overloads
				return;
			SmartPooledFor( a, b, sign, body, Environment.ProcessorCount );
		}		
		public static void For( int a, int b, int step, Action<int> body ) {
			SmartPooledFor( a, b, step, body, Environment.ProcessorCount );
		}
		public static void For( int a, int b, int step, Action<int> body, int numProcessors )	
			{ SmartPooledFor( a, b, step, body, numProcessors ); }				

		/// <summary>
		/// Performs a for loop in parallel, method body should be checked for sequential dependencies...
		/// </summary>
		/// <param name="a">Starting Point(Inclusive)</param>
		/// <param name="b">EndPoint(Exclusive)</param>
		/// <param name="step">The increment/decrement value</param>
		/// <param name="body">The body of the for loop.</param>
		/// <param name="numPartitions">Number of Processors to optimize for </param>
		public static void SmartPooledFor( int a, int b, int step, Action<int> body, int numPartitions )
		{
			int	sign = Math.Sign( b - a	);
			if( sign != Math.Sign( step ) )
				throw new Exception( "Invalid step for range.  Loop will never complete." );
			step = Math.Abs( step ); //preserved sign in sign var

			int ops	= Math.Abs( b - a ) / step; //cases where true: (0 to 5) excluding 5(using the < operator) is 5 numbers so its equal to the difference
			if( ops == 0 ) //if there are no operations to perform
				return; //don't process when there is nothing to do
			
			int	leftOut; //the number of indices that won't distribute evenly across all available processors, is always a number less than the available processors
			int	partitionSize = Math.DivRem( ops, numPartitions, out leftOut ); //get the number of indices per processor and the number of items left out
						
			ManualResetEvent partitionedWork = new ManualResetEvent(false); //these thingys make my function wait before exiting			

			for ( int part = 0; part < numPartitions; part++ ) {				//start at 0, the first partition going up to numPartitions(which is==numOfCores-1)
				ThreadPool.QueueUserWorkItem( delegate( Object partNumObj ) {	//pass the work to be done to the thread pool						
					int partIndex	= (int)partNumObj; //cast it to an int so we can use it in the inner scope of the delegate
					int offset		= (a + (sign*partIndex*partitionSize) ); //the offset is equal to the partIndex a+(sign)					
					for( int j = 0; j < partitionSize; j += step ) { //must start j at 0, because we are counting the number of indices in each list						
						if( ops == leftOut )
							throw new Exception( "loop still executing when there is no work do be done." );
						body( offset + (sign*j) ); //this is where the work gets done
						Interlocked.Decrement( ref ops ); //decrement the number of operations that need to be done using a safe op SEE: Interlocked
						if( ops == leftOut ) { //left out is never changed
							partitionedWork.Set(); //this lets the parent thread move on after it calls the WaitOne
							//Console.WriteLine( "j:" + j + " < " + partitionSize );							
							break; //stop executing the for breaking here saves us from doing one extra comparison at the top
						} 
					}
				}, part );
			}

			// I don't think we need to wait for anything by this point
			// So basically will have CoreNum+1 Threads running...thats acceptable
			// its really no big deal since this method is meant to be called for 
			// smaller operations, again the left out work is always smaller than the number of cores, 
			//so the lightweight operation on today's quad-core machine will only be run 3 times at most 

  			int l = b-(sign*leftOut);//(numPartitions-1)*partitionSize; 
			for( int i = leftOut; i > 0; i-- ) {
				body( l ); //maybe could just use the one variable...and not decrement and increment
				l++;
			}			
			if( partitionSize > 0 || ops != leftOut )
			{
				//if( partitionSize > 0 )
				//Console.WriteLine( "paritionSize is " + partitionSize );
				partitionedWork.WaitOne(); //let all asynch processing finish, after it we will have to do less than ProcNum operations anyway				
			}				
			return;
		}

		/// <summary>
		/// A smart pooled for with possibility of early exit.  Basically ideal for multi-core searching algorithms.
		/// </summary>
		/// <param name="a">Starting Point(Inclusive)</param>
		/// <param name="b">EndPoint(Exclusive)</param>
		/// <param name="step">The increment/decrement value</param>
		/// <param name="exitEarly">The body of the for loop, should return true to stop execution end execution of other items on the threadpool.</param>
		/// <param name="numPartitions">Number of Processors to optimize for</param>
		public static void SmartPooledFor( int a, int b, int step, ExitEarly<int> exitEarly, int numPartitions )
		{
			int	sign = Math.Sign( b - a	);
			if( sign != Math.Sign( step ) )
				throw new Exception( "Invalid step for range.  Loop will never complete." );
			step = Math.Abs( step ); //preserved sign in sign var

			int ops	= Math.Abs( b - a ) / step; //cases where true: (0 to 5) excluding 5(using the < operator) is 5 numbers so its equal to the difference
			if( ops == 0 ) //if there are no operations to perform
				return; //don't process when there is nothing to do
			
			int	leftOut; //the number of indices that won't distribute evenly across all available processors, is always a number less than the available processors
			int	partitionSize = Math.DivRem( ops, numPartitions, out leftOut ); //get the number of indices per processor and the number of items left out
						
			ManualResetEvent partitionedWork	= new ManualResetEvent(false); //these thingys make my function wait before exiting			
			bool exitedEarly = false;

			for ( int part = 0; part < numPartitions; part++ ) {				//start at 0, the first partition going up to numPartitions(which is==numOfCores-1)
				ThreadPool.QueueUserWorkItem( delegate( Object partNumObj ) {	//pass the work to be done to the thread pool	
					if( exitedEarly )
					{
						return; //don't execute!!!
					}
					int partIndex	= (int)partNumObj;							//cast it to an int so we can use it in the inner scope of the delegate
					int offset		= (a + (sign*partIndex*partitionSize) );	//the offset is equal to the partIndex a+(sign)					
					for( int j = 0; j < partitionSize; j += step ) {			//must start j at 0, because we are counting the number of indices in each list						
						if( exitEarly( offset + (sign*j) ) )					//the money maker; this is how we get the desired indices
						{
							partitionedWork.Set();
							exitedEarly = true; //makes the already loaded items on the ThreadPool exit early...This is a decent temporary solution...
							//Ideally we would have our own private threadpool whose Items we could clear, The ProcPool is ideal, but not quite ready
							//TODO: implement using processor pool when it is ready for primetime
						}
						Interlocked.Decrement( ref ops );						//asynchronously decrement the number of operations that need to be done
						if( ops == leftOut )									//not sure if this is the best way to do this...perhaps the Interlocked CompareExchange?
							partitionedWork.Set();								/* this lets the parent thread move on after it calls the WaitOne */
					}															
				}, part );
			}
			
			// So basically we will have CoreNum+1 Threads running...thats acceptable
			// its really no big deal since this method is meant for smaller workloads
			// again the number executations not passed to to the thread pool(left out work)is always smaller than the number of cores
			// so the lightweight operation on today's most advanced machine will only be run 3 times at most
			// I'll change this when I get my 16 core machine

  			int l = b-(sign*leftOut);//(numPartitions-1)*partitionSize; 
			for( int i = leftOut; i > 0; i-- ) {
				if( exitEarly( l ) ) {
					exitedEarly = true;
					break; //break from the for, and move on our work is finished
				}
				l++;
			}			
			if( partitionSize > 0 || ops != leftOut )
				partitionedWork.WaitOne(); //let all asynch processing finish, after it we will have to do less than ProcNum operations anyway			
			return;
		}

		/// <summary>
		/// Uses the Dambach ThreadPool instead of the Microsoft one
		/// Now is working just about as well as the other Top Methods
		/// </summary>
		[Obsolete("Do not use, bugs be in here!")]
		public static void ProcPooledFor( int a, int b, int step, Action<int> method )
		{
            int sign = Math.Sign(b - a);
            if (sign != Math.Sign(step))
                throw new Exception("Invalid step for range.  Loop will never complete.");
            step = Math.Abs(step); //preserved sign in sign var

            int ops = Math.Abs(b - a) / step; //cases where true: (0 to 5) excluding 5(using the < operator) is 5 numbers so its equal to the difference
            int totalOps = ops; //keep a copy tucked aside, we will modify ops to indicate when processing is finished, so it might get modified before we are done filling the thread pool
            if (ops == 0) //if there are no operations to perform
                return; //don't process when there is nothing to do, necessary to keep this from blocking when there is nothing to do...			
            int trueStep = (step * sign); //the num we add

            AutoResetEvent are = new AutoResetEvent(false); //set it to false at first, so it waits

            for (int i = 0; i < totalOps; i += step) //trusting them to give a valid step value right now
            {
                ProcessorPool.QueueUserWorkItem(delegate(object param)
                {
                    int x = (int)param;
                    method(a + (x * trueStep)); //eek
                    Interlocked.Decrement(ref ops);
                    if (ops == 0)
                        are.Set();
                }, i);
            }
            are.WaitOne(); //externally they are in the other order, to facilitate neater Do invocations using inline delegates
            return;
		}	//externally they are in the other order, to facilitate neater Do invocations using inline delegates
		
		public static void ProcPooledForWithStride( int a, int b, Action<int> body, int numProcessors )
		{
			int ops = (b - a);		//cases where true: (0 to 5) excluding 5(using the < operator) is 5 numbers so its equal to the difference
			if( ops <= 0 ) return;	//don't process when there is nothing to do
			
			int	leftOut;		//the number of indices that won't distribute evenly across all available processors, is always a number less than the available processors as it is a remainder
			int	partitionSize = Math.DivRem( ops, numProcessors, out leftOut ); //get the number of indices per processor and the number of items left out
			
			//sync variables
			ManualResetEvent partitionedWork = new ManualResetEvent(false); //these thingys make my function wait before exiting

			for ( int i = 0; i < numProcessors; i++ ) { //that ought to give us the indices we desire				
				ProcessorPool.QueueUserWorkItem( delegate( Object iObj ) { //a stays the same, so does partitionSize
					int ii		= (int)iObj;  //and J is ours so we can change it!!!, no need to do interlock with it either as it comes from within					
					int offset	= (ii*partitionSize); //only need to do this once for each queue
					for( int j = a; j < a+partitionSize; j++ ) { //must start j at 0, because we are counting the number of indices in each list											
						body( offset + j ); //the money maker //so how to get the first address to start at the desired index 
						Interlocked.Decrement( ref ops ); //asynchronously decrement the number of operations that need to be done
						if( ops == leftOut ) //not sure if this is the best way to do this...perhaps the Interlocked CompareExchange?
							partitionedWork.Set( /* this lets the parent thread move on after it calls the WaitOne */ );
					}
				}, i ); //pass each thread the appropriate list of indices to process				
			}

			//I don't think we need to wait for anything by this point...yeah!!! So basically will have CoreNum+1 Threads running...I think thats acceptable
			//ProcessorPool.QueueUserWorkItem( delegate(Object o) {
      			int l = numProcessors*partitionSize+a; //on large sets of data...it might be worth it on machines with a large number of processors to do this in a thread of its own...
				for( int i = leftOut; i > 0; i-- ) {
					body( l ); //maybe could just use the one variable...and not decrement and increment
					l++;
				}
			//	leftOverWork.Set(); //will be set no matter what
			//}, null );
			//ProcessorPool.Finish();

			if( partitionSize > 0 || ops != leftOut )
				partitionedWork.WaitOne(); //let all asynch processing finish, after it we will have to do less than ProcNum operations anyway			
			//leftOverWork.WaitOne();
			return;
		}
	}
}
