package tetris.agent;

import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;

import org.apache.commons.math3.distribution.MultivariateNormalDistribution;

import tetris.features.*;
import tetris.simulator.State;
import tetris.simulator.Visualizer;

class WeightValuePair {
	  public double[] weight;
	  public double value;
	}

class WeightValuePairComparator implements Comparator<WeightValuePair> {
	public int compare(WeightValuePair object1, WeightValuePair object2) {
		if (object1.value < object2.value)
			return 1;
		else
			return -1;
	}

public class crossEntropyRuta {

	public double[] reSampleCurrentDist(){
		//get initial samples from main
		int numSamples = 100;
		double[][] samples = new double[numSamples][12]; 
		double[][] newSamples = new double[numSamples][12];

		//get a multivariate gaussian class
		multiVariateGaussian Dist = new multiVariateGaussian();
		MultivariateNormalDistribution currentDist = Dist.fitToSamples(samples);

		//Sample a gaussian
		for (int k=0; k<numSamples; k++){
			newSamples[k] = currentDist.sample();
		}

		//evaluate samples: w'F
		double[] evalSamples = new double[numSamples];
		featureutils fts = new featureutils();
		for (int i=0; i<numSamples; i++){
			evalSamples[i] = runEval(newSamples[i]);
		}
		
		return evalSamples;
	}
	
	public double[][] getElite(double[] evalSamples){
		
		List<WeightValuePair> sampleList = new ArrayList<WeightValuePair>();
	      // Sample and evaluate 
	      for (int i = 0; i < numSamples; i++) {
	        WeightValuePair pair = new WeightValuePair();
	        pair.weight = sampleWeight();
	        for (int turn = 0; turn < 30; turn ++) 
	          pair.value += evaluateWeight(pair.weight)/30.0;
	        sampleList.add(pair);
	        System.out.println("Epoch: "+nEpoch+" Sample: "+i+"Score: "+pair.value);
	      }
	      Collections.sort(sampleList, new WeightValuePairComparator());
	      
		return elite;
	}

	public double runEval(double[] wt){
		
	    State s = new State();
	    Agent a = new Agent();
	    double sumEvalVal = 0.0; // count till death
	    
		while(!s.hasLost()) {
			
			// take action which maximizes w'F
			int actTotake = a.chooseAction_WtsFts(s, wt);
			s.makeMove(actTotake);
			
			// Get goodness = sum of w'F for the chosen action
			featureutils fts = new featureutils();
			double[] allFeatures = fts.features(s, actTotake);
			sumEvalVal += multiVariateGaussian.dotProduct(allFeatures, wt);
	
		}
		return s.getRowsCleared();

	}
	// pick elite samples: sort (w'F) and pick top p values

	//  get a new distribution using mean and variance of elite samples

	// Iterate

}
