using System;
using System.Collections.Generic;
using System.Text;

/*
 * Heavily modified version I found in Martin Konicek blog entry.
 * All props to him for his excellent version
 *	 http://coding-time.blogspot.com/2008/03/implement-your-own-parallelfor-in-c.html
 *	
 */
namespace MetX
{
	public class ParallelList<T> : List<T>
	{
		/// <summary>
		/// ChunkSize of 1 makes items to be processed in order.
		/// Bigger values of chunkSize should reduce lock waiting time and thus increase paralelism.</param>
		/// </summary>
		public int ChunkSize = 4;

		/// <summary>
		/// Multiplies the number of active thread objects per processor. 
		/// Alter this based on application. Long operations with lots of long waits can have a higher value
		/// while short operations with few waits should consider using 1.5 or maybe 2.
		/// A multiplier between 1 and 2 (1.7 or 1.5 for instance) insures are spinning up more threads than processors, 
		/// but not too many more given that other processes/appdomains will be doing the same thing.
		/// Call it greasing the wheel.
		/// </summary>
		public double ProcessorMultiplier = 1.5;

		public void ForEach(Action<T> ItemCode)
		{
			Parallel.For(0, Count, ChunkSize, ProcessorMultiplier, delegate(int i)
			{
				ItemCode(this[i]);
			});
		}
	}

	/// <summary>
	/// Simplifies performing operations in parallel based on the number of processors
	/// </summary>
	public static class Parallel
	{
		/// <summary>
		/// ChunkSize of 1 makes items to be processed in order.
		/// Bigger values of chunkSize should reduce lock waiting time and thus increase paralelism.</param>
		/// </summary>
		public static int DefaultChunkSize = 4;

		/// <summary>
		/// Multiplies the number of active thread objects per processor. 
		/// Alter this based on application. Long operations with lots of long waits can have a higher value
		/// while short operations with few waits should consider using 1.5 or maybe 2.
		/// A multiplier between 1 and 2 (1.7 or 1.5 for instance) insures are spinning up more threads than processors, 
		/// but not too many more given that other processes/appdomains will be doing the same thing.
		/// Call it greasing the wheel.
		/// </summary>
		public static double DefaultProcessorMultiplier = 1.5;

		delegate void ThreadDelegate();

		/// <summary>
		/// Parallel foreach style loop for generic IList collections.
		/// Invokes given Action on multiple threads based on number of processors.
		/// NOTE: Processing order is not (Non-deterministic)
		/// Returns when loop finished.
		/// </summary>
		public static void ForEach<T>(IList<T> ToDivide, Action<T> ItemCode)
		{
			For(0, ToDivide.Count, DefaultChunkSize, DefaultProcessorMultiplier, delegate(int i)
			{
				ItemCode(ToDivide[i]);
			});
		}

		/// <summary>
		/// Parallel foreach style loop for generic IDictionary collections.
		/// Invokes given Action on multiple threads based on number of processors.
		/// Returns when loop finished.
		/// </summary>
		public static void ForEach<K, V>(IDictionary<K, V> ToDivide, Action<KeyValuePair<K, V>> ItemCode)
		{
			object SyncRoot = new object();
			KeyValuePair<K, V> BlankItem = new KeyValuePair<K,V>();
			IEnumerator<KeyValuePair<K, V>> Advisor = ToDivide.GetEnumerator();
			For(0, ToDivide.Count, DefaultChunkSize, DefaultProcessorMultiplier, delegate(int i)
			{
				bool HaveItem = false;
				KeyValuePair<K, V> CurrItem = BlankItem;
				lock (SyncRoot)
					if ((HaveItem = Advisor.MoveNext()) == true)
						CurrItem = Advisor.Current;
				if (HaveItem)
					ItemCode(CurrItem);
			});
			Advisor.Dispose();
		}

		/// <summary>
		/// Parallel for loop for a range of numbers. 
		/// Invokes given action, passing arguments 
		/// fromInclusive - toExclusive on multiple threads.
		/// Returns when loop finished.
		/// </summary>
		public static void For(int fromInclusive, int toExclusive, Action<int> ItemCode)
		{
			For(fromInclusive, toExclusive, DefaultChunkSize, DefaultProcessorMultiplier, ItemCode);
		}

		/// <summary>
		/// Parallel for loop. Invokes given action, passing arguments 
		/// fromInclusive - toExclusive on multiple threads.
		/// Returns when loop finished.
		/// </summary>
		/// <param name="chunkSize">The number of pieces to break the range of items into. 
		/// ChunkSize of 1 makes items to be processed in order.
		/// Bigger values of chunkSize should reduce lock waiting time and thus increase paralelism.</param>
		public static void For(int fromInclusive, int toExclusive, int chunkSize, double ProcessorMultiplier, Action<int> ItemCode)
		{
			if (chunkSize < 1)
				chunkSize = 4;

			if (ProcessorMultiplier > 4)
				ProcessorMultiplier = 4;

			// number of process() threads
			int threadCount = (int) Math.Floor((double) Environment.ProcessorCount * ProcessorMultiplier);
			int cnt = fromInclusive - chunkSize;

			object SyncRoot = new object(); 

			// processing function that takes the next chunk and call back to the supplied Action delegate
			ThreadDelegate process = delegate()
			{
				while (true)
				{
					int cntMem = 0;
					lock (SyncRoot)
					{
						// take next chunk
						cnt += chunkSize;
						cntMem = cnt;
					}
					// process chunk.
					// here items can come out of order if chunkSize > 1
					for (int i = cntMem; i < cntMem + chunkSize; ++i)
					{
						if (i >= toExclusive) return;
						ItemCode(i);
					}
				}
			};

			if (cnt <= chunkSize)
			{
				// Below the chunk threshhold. Just process them in order (efficient for small arrays/collections)
				process.Invoke();
			}
			else
			{
				// launch process() threads
				IAsyncResult[] asyncResults = new IAsyncResult[threadCount];

				for (int i = 0; i < threadCount; ++i)
					asyncResults[i] = process.BeginInvoke(null, null);

				// wait for all threads to complete
				for (int i = 0; i < threadCount; ++i)
					process.EndInvoke(asyncResults[i]);
			}
		}
	}
}