package tetris.agent;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import tetris.simulator.*;
import tetris.agent.ApproxFunction;

public class LikelihoodRatioMethod extends Tuple {
	protected State tetrisState;
	protected double[] weight;

	// get a single tuple within a trajectory
	public Tuple collectTuples(State s, double[] weight){

		ApproxFunction ApxFn = new ApproxFunction();
		RewardFunctions getRwd = new RewardFunctions();

		State s0 = new State(s);
		int[][] allowedMoves = s0.legalMoves();

		// select action based on policy for s0
		int actToTake = ApxFn.softMaxPolicy(s0, allowedMoves, weight);
		// forward simulate and get reward
		State s1 = forwardSimulate(s0, actToTake);
		//double r0 = getRwd.rowsCleared(s1);
		double r0 = oneStepGreedyReward(s0,actToTake);
		//double r0 = s1.getLastRowsCleared();
		
		// Save {s,a,r}
		Tuple tupleOut = new Tuple();
		tupleOut.st = s0;
		tupleOut.reward = r0;
		tupleOut.nxtSt = s1;
		return tupleOut;		
	}

	// forward simulate function
	State forwardSimulate (State inputState, int action) {
		State outputState = new State(inputState);
		if (!inputState.hasLost())
			outputState.makeMove(action);
		return outputState;
	}


	// run for single trajectory
	public List<Tuple> run(double[] weight) {
		tetrisState = new State();
		List<Tuple> trajectory = new ArrayList<Tuple>();

		while (!tetrisState.hasLost()) {
			Tuple samples = new Tuple();
			samples = collectTuples(tetrisState, weight);
			trajectory.add(samples);

			tetrisState = samples.nxtSt;

		}

		return trajectory;
	}

	//run to get multiple trajectories
	public List<List<Tuple>> runMultipleTraj(int numTraj, double[] wt){

		List<List<Tuple>> allTrajectories = new ArrayList<List<Tuple>>();
		for (int N=0; N<numTraj; N++){
			List<Tuple> trajectory = new ArrayList<Tuple>();
			trajectory = run(wt);
			allTrajectories.add(trajectory);
		}
		return allTrajectories;
	}

	// Get delJ for a single trajectory
	public double[] delJ(List<Tuple> traj, double[] w){
		double delJ[] = new double[w.length];
		double R = 0.0;
		ApproxFunction ApxFn = new ApproxFunction();

		// end state reward is sum of all state reward as it is = totalrows cleared
		// thus end state reward = sum rt where rt = row cleared in time step t
		//R = traj.get(traj.size()-1).reward;

		// get sum of dellogP		
		for (int i=0; i<traj.size(); i++){
			State sTime = traj.get(i).st;
			int aTime = traj.get(i).action;
			R += traj.get(i).reward;
			delJ = ApproxFunction.sumArrays(delJ, ApxFn.delLogPolicy(sTime,sTime.legalMoves(), aTime, w), true);
		}

		// get delJ by multiplying delLogP with R		
		delJ = ApproxFunction.scalarProduct(delJ, R, true);
		System.out.println("R= "+R +"and delJ " + Arrays.toString(delJ));
		return delJ;
	}

	public double[] delJMultipleTraj(List<List<Tuple>> allTraj, int numTraj, double[] wt){
		double[] delJAll = new double[wt.length];
		
		for (int N=0; N<numTraj; N++){
			List<Tuple> traj = new ArrayList<Tuple>();
			traj = allTraj.get(N);
			delJAll = ApproxFunction.sumArrays(delJAll, delJ(traj, wt), true);					
		}
		
		// divide sum by total N
		delJAll = ApproxFunction.scalarProduct(delJAll, numTraj, false);
		return delJAll;
	}
	
	private double oneStepGreedyReward (State s, int action){
		State newState = new State(s);
		newState.makeMove(action);
		
		int maxTop = 0;
		for (int i = 0; i < newState.getTop().length; i++){
			if (newState.getTop()[i] > maxTop)
				maxTop = newState.getTop()[i];				
		}
		//return State.ROWS - maxTop;
		double metricHeight = State.ROWS - maxTop;
		
		int sumArea = 0;
		for (int i = 0; i < newState.getTop().length; i++){
			sumArea+=newState.getTop()[i];			
		}
		//return State.ROWS*State.COLS - sumArea;
		double metricArea = State.ROWS*State.COLS - sumArea;
		
		//return metricHeight*0.9 + metricArea*0.1;
		//return metricHeight;
		return 0.9*metricHeight;
		
	}
	
	
}
