package tetris;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import tetris.simulator.*;
import tetris.agent.*;
import tetris.features.FeatureExtraction;


public class Main {

  public static void main(String[] args) {
	  
	  //****************************
	  // Step -I: Get initial training data:
	  //****************************
	  
	  //Initialize weights theta for policy
	  int numTraj = 100;
	  //int wtSize = FeatureExtraction.featureSize(0);
	  double[] wt = {1,0,0,0,0,0,0,0,0,0,0,0};
		  //{0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1}; 
	  //{0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1};
	  //new double[wtSize];
	  
	  // Collect multiple trajectories of {s,a,r}
	  LikelihoodRatioMethod trialRun = new LikelihoodRatioMethod();
	  List<List<Tuple>>  trainDatabase = new ArrayList<List<Tuple>>();
	  trainDatabase = trialRun.runMultipleTraj(numTraj, wt);
	  
	  System.out.println("Initial trajectories of state, action, reward tuples obtained");
	  
	  //******************************************
	  // Step -II: Gradient and weight update:
	  //******************************************
	  int trainCycles = 100;
	  for (int trainCount=0; trainCount<trainCycles;trainCount++){
		  int horizon = 50;
		  for (int i=0; i<horizon; i++){
			  double alpha = (double)1/(i+1);
			  System.out.println("alpha = "+alpha);
			  double[] delJ = new double[wt.length];
			  
			  delJ = trialRun.delJMultipleTraj(trainDatabase, numTraj, wt);
			  System.out.println("dJ = "+ Arrays.toString(delJ));
			  wt = ApproxFunction.sumArrays(wt, ApproxFunction.scalarProduct(delJ, alpha, true), true);
			  System.out.println("Wts "+Arrays.toString(wt));
			  System.out.println("iteration= "+i);
		  }
		  System.out.println("Train Cycle="+trainCount );
		  // recollect training samples
		  trainDatabase = trialRun.runMultipleTraj(numTraj, wt); 
	  }	  
	  
	  
	  System.out.println("Weights of policy found");
	  //************************************************
	  // Step -III: Run the game using obtained weights
	  //************************************************
	  
	  State sFinal = runGraphicsLikelihoodRatioMethod(wt);
	  //State sFinal = runGraphics(); //DEBUG
	  System.out.println("You have completed "+sFinal.getRowsCleared()+" rows.");
	  
	  //************************************************
	  // Step -IV: Record Video
	  //************************************************
	  recordVideo();
	  
  }

  //run the tetris game and save image of the board at each turn, returns the final state
  public static State recordVideo()
  {
    int delay = 500;

    State s = new State();
    Visualizer v = new Visualizer(s);
    Agent a = new Agent();
    while(!s.hasLost()) {
      s.makeMove(a.chooseAction(s,s.legalMoves()));
      v.draw();
      v.drawNext(0,0);
      v.save(s.getTurnNumber() + ".png");
      try {
        Thread.sleep(delay);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }
    v.dispose();
    return s;
  }


  //runs the tetris game until the game is over and returns the final state
  public static State run()
  {
    State s = new State();
    Agent a = new Agent();
      
    while(!s.hasLost()) {
      s.makeMove(a.chooseAction(s,s.legalMoves()));
    }
    return s;
  }

  //runs and displays the tetris game until the game is over and returns the final state
  public static State runGraphics()
  {
    int delay = 500;

    State s = new State();
    Visualizer v = new Visualizer(s);
    Agent a = new Agent();
    while(!s.hasLost()) {
      s.makeMove(a.chooseAction(s,s.legalMoves()));
      v.draw();
      v.drawNext(0,0);
      try {
        Thread.sleep(delay);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }
    v.dispose();
    return s;
  }
  
  public static State runGraphicsLikelihoodRatioMethod(double[] wt)
  {
    int delay = 500;

    State s = new State();
    Visualizer v = new Visualizer(s);
    Agent a = new Agent();
    while(!s.hasLost()) {
      s.makeMove(a.chooseActionUsingLikelihoodGradient(s, s.legalMoves(), wt));
      v.draw();
      v.drawNext(0,0);
      try {
        Thread.sleep(delay);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }
    v.dispose();
    return s;
  }

  //allows a human player to play using the 4 arrow keys
  // left: move the piece left
  // right: move the piece right
  // top: change orientation
  // down: drop piece
  // there is no time limit for choosing where to place the next piece
  public static State runHumanPlayer()
  {
    int delay = 100;
    State s = new State();
    Visualizer v = new Visualizer(s);
    v.draw();
    v.drawNext(0,0);
    while(!s.hasLost()) {
      try {
        Thread.sleep(delay);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }
    v.dispose();
    return s;
  }

}

