package tetris.agent;

import tetris.simulator.State;

public class HillClimber 
{
	public Agent currentAgent;
	public double[] bestTheta;
	public double bestScore;
	
	public HillClimber(double[] initialTheta)
	{
		currentAgent = new Agent();
		bestTheta = new double[currentAgent.theta.length];
		bestScore = 0.0;
		Matrix.copy(currentAgent.theta, initialTheta);
		Matrix.copy(bestTheta, currentAgent.theta);
	}
	
	public Agent hillClimb(State start, int iters)
	{
		double mutationRate = 10.0f;
		double mutationScale = 0.99f;
		int maxSteps = 1000000;
		double[] delta = new double[currentAgent.theta.length];
		double[] delta2 = new double[currentAgent.theta.length];
		for(int i = 0; i < iters; i++)
		{

			double currentLines = 0;
			
			for(int j = 0; j <4 ; j++)
			{
				// Forward simulate the hill climber.
				currentAgent.forwardSimulate(start, maxSteps, 1.0);
				currentLines += currentAgent.lines;
			}
			currentLines /= 4;
			
			// Replace the best.
			if(currentLines > bestScore)
			{
				System.out.printf("Got a better score: %f\n", currentLines);
				bestScore = currentLines;
				Matrix.copy(bestTheta, currentAgent.theta);
				mutationRate *= mutationScale;
			}
			else
			{
				Matrix.copy(currentAgent.theta, bestTheta);
			}
			
			// Generate an offset to apply to theta.
			Matrix.rand(delta);
			Matrix.rand(delta2);
			Matrix.scale(delta, mutationRate);
			Matrix.scale(delta2, mutationRate);
			Matrix.sub(delta, delta, delta2);
			
			// Add it to the current theta.
			Matrix.add(currentAgent.theta, delta, currentAgent.theta);
			
			
			System.out.printf("%d/%d\n", i, iters);
		}
		
		Matrix.copy(currentAgent.theta, bestTheta);
		return currentAgent;
	}
	
}
