package algorithm;

import gridmath.CaseMatrixBase;
import gridmath.MatrixOperations;
import gridmath.NumberOfCombinations;
import io.OutputWriter;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Vector;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import pac.SearchDatastructure;
import pac.SamplingThread;
import preprocessing.ParameterOptimizer;
import preprocessing.SimpleConstraintHandler;
import datastructures.WorkUnit;

public class ProbApproxCompleteSearch {

	private SearchDatastructure searcher;
	private OutputWriter outputter;
	
	private long totalProjectedTrials = 0; 
	private long totalProjectedCandidates = 0;
	
	public ProbApproxCompleteSearch( SearchDatastructure I, OutputWriter O) throws IOException {
		searcher = I;
		outputter = O;
	}
	
	public void doPACSearch( ) throws Exception 
	{ 	
		SimpleConstraintHandler searchFilter = new SimpleConstraintHandler ( searcher ); // calculate probability matrices
		CaseMatrixBase pSignificantMatrix = searchFilter.getPSignificantMatrix();
		CaseMatrixBase pLEMatrix = searchFilter.getPLEMatrix();
		
		NumberOfCombinations combinations = new NumberOfCombinations ( searcher );
		combinations.compute(MatrixOperations.calculateCombinations, null, true);
		
		searchFilter = null; //garbage collect this bloated crap
		
		//6. Calculate the optimal parameters
		System.out.println("\nOptimally distributing search effort over filters (this may take a while) ... ");
		ParameterOptimizer optParams = new ParameterOptimizer(searcher);
		
		Vector<WorkUnit> distributedWorkSet = optParams.optimallyDistributeWork ( pSignificantMatrix, pLEMatrix, combinations );		
		ArrayList<SamplingThread> threadList = distributeThisWork(distributedWorkSet);
		
		//7. Start the execution		
		searcher.stage1Time = 0; //these are populated by Sampling Thread.
		searcher.stage2Time = 0;
		Runtime.getRuntime().gc();
		
		System.out.println("CPU cores utilized = " + searcher.userParams.getMaxThreads());
		ExecutorService executor = Executors.newFixedThreadPool ( searcher.userParams.getMaxThreads() );
		
		boolean oddNumberOfThreads = false;
		if( threadList.size() % 2 == 1)
			oddNumberOfThreads = true;
		
		int threadNo=0; //alternate between last and first so as to interleave memory usage patterns.
		for(; threadNo < (int) (threadList.size()/2); threadNo++) {
			executor.execute( threadList.get( threadList.size()-1 - threadNo) );
			executor.execute( threadList.get( threadNo) );		
		}
		if(oddNumberOfThreads)
			executor.execute( threadList.get(threadNo) );	
		
		executor.shutdown();
		
		while (!executor.isTerminated()) { 
			Thread.sleep(120000); //peek every 2 minutes			
		//	if ( searcher.stage1Trials - prev_completed > totalProjectedTrials/100 ) {
		//		prev_completed = searcher.stage1Trials;
				printProgressAndProjections();
		//	}
		}

		outputter.Close();
	}
	
	
	/*
	 * Assigns work units to a list of executable SamplingThreads
	 */
	public ArrayList<SamplingThread> distributeThisWork ( Vector<WorkUnit> distributedWorkSet ) 
	{	
		int numberOfWorkerThreads = 0;
		ArrayList<SamplingThread> threadList = new ArrayList<SamplingThread>();
		
		double mostTrials = 0; double mostCandidates = 0;
		for ( WorkUnit current : distributedWorkSet ) 
		{
			int total_trials = current.getTotalRequiredTrials();
			double worstExpectations = current.getWorstExpectedCombinationsFromRegion();
			
			if ( mostTrials < total_trials) 
				mostTrials = total_trials;
			
			if(mostCandidates < worstExpectations)
				mostCandidates = worstExpectations;
			
			totalProjectedTrials += total_trials;
			totalProjectedCandidates += ( 0.5*current.getWorstExpectedCombinationsFromRegion() ) ;
			
			SamplingThread s = new SamplingThread ( current, searcher, numberOfWorkerThreads, outputter );
			threadList.add(s);
			numberOfWorkerThreads++;
		}
		
		System.out.println("\n** WORST OFFENDERS **");
		System.out.println("Memory:\n" + mostCandidates/10000);
		System.out.println("Samplings:\n" + mostTrials);
		System.out.println("** WORST OFFENDERS **\n");

		System.out.println("Running " + numberOfWorkerThreads + " search threads, " + searcher.userParams.getMaxThreads() + " at a time ... ");
		
		return threadList;
	}
	
	
	/*
	 * prints progress and projections
	 */
	public void printProgressAndProjections() 
	{
		//These rates correct for number of threads running in parallel!
		double stage1rate = (double) Math.round( searcher.stage1Time*1000/searcher.stage1Trials ) / ( searcher.userParams.getMaxThreads() * 1000 ); 
		double stage2rate = (double) Math.round( searcher.stage2Time*1000/searcher.stage1Candidates ) / ( searcher.userParams.getMaxThreads() * 1000 ); 

		System.out.println("\n***************************************");
		System.out.println("Amortized performance so far: ");
		System.out.println(stage1rate + " ms per sample");
		System.out.println("(lookup time included)");
		System.out.println();

		long remainingTrials = totalProjectedTrials - searcher.stage1Trials;
		long remainingCandidates = Math.max( totalProjectedCandidates - searcher.stage1Candidates, 0 );

		double remainingStage1Time = (double) Math.round( (remainingTrials*stage1rate*100)/(1000*3600) ) / 100;
		double remainingStage2Time = (double) Math.round( (remainingCandidates*stage2rate*100)/(1000*3600) ) / 100;
		double spentTime = (double) Math.round ( ( (System.currentTimeMillis() - searcher.startTimeMs) * 100 ) / (1000*3600) ) / 100;

		System.out.println("Projected runtime till completion: "); 
		System.out.println( (remainingStage1Time + remainingStage2Time) + " hrs remaining");
		System.out.println( spentTime + " hrs passed");
		System.out.println( (spentTime + remainingStage1Time + remainingStage2Time) + " hrs total");

		Runtime runtimeObject = Runtime.getRuntime();
		long heap_usage = runtimeObject.totalMemory() - runtimeObject.freeMemory();
		System.out.println("\nHeap usage:\n" + (float)heap_usage/1000000 + " Mb");	

		System.out.println("***************************************\n");
		System.out.flush();
	}
	
}
