package tetris;

import java.util.ArrayList;

import tetris.simulator.*;
import tetris.agent.*;
import tetris.features.*;


public class Main {

  public static void main(String[] args) {
    //State sFinal = runHumanPlayer();
    State sFinal = runAgent(1);
    System.out.println("You have completed "+sFinal.getRowsCleared()+" rows.");
    //learnCrossEntropy();
    //learnPolicyGradient();
    //runEvaluation(2);
    //recordVideo(2);
  }

  //run the tetris game and save image of the board at each turn, returns the final state
  public static State recordVideo(int agentID)
  {    
    int delay = 10;

    State s = new State();
    Visualizer v = new Visualizer(s);
    Agent a;
    ArrayList<Feature> featureSet = new ArrayList<Feature>();
    Feature feature;
    switch (agentID) {
    case 1:
        System.out.println("Running DellacherieAgent");
        a = new DellacherieAgent();
        break;
    case 2:
        System.out.println("Running Cross Entropy Agent");
        featureSet.add(new MaxColHeightFeature());
        featureSet.add(new LandingHeightFeature());
        featureSet.add(new ErodedFeature());
        featureSet.add(new RowTransitionFeature());
        featureSet.add(new ColTransitionFeature());
        featureSet.add(new HolesFeature());
        featureSet.add(new CumulativeWellsFeature());
        featureSet.add(new HoleDepthFeature());
        featureSet.add(new RowHolesFeature());
        feature = new ConcatenateFeature(featureSet);

        a = new CrossEntropyAgent(feature, "ce5.txt");
        break;
    case 3:
        System.out.println("Running Policy Gradient Agent");
        featureSet.add(new MaxColHeightFeature());
        featureSet.add(new LandingHeightFeature());
        featureSet.add(new ErodedFeature());
        featureSet.add(new RowTransitionFeature());
        featureSet.add(new ColTransitionFeature());
        featureSet.add(new HolesFeature());
        featureSet.add(new CumulativeWellsFeature());
        featureSet.add(new HoleDepthFeature());
        featureSet.add(new RowHolesFeature());
        feature = new ConcatenateFeature(featureSet);

        a = new PolicyGradientAgent(feature, "policygrad1.txt");
        break;
    default:
        System.out.println("Running default agent");
        a = new Agent();
        break;
    }


    while(!s.hasLost()) {
      int action = a.chooseAction(s,s.legalMoves());
      s.makeMove(action);
      v.draw();
      v.drawNext(0,0);
      v.save(s.getTurnNumber() + ".png");
      try {
        Thread.sleep(delay);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }
    v.dispose();
    return s;
  }


  //runs the tetris game until the game is over and returns the final state
  public static State run()
  {
    State s = new State();
    Agent a = new Agent();

    while(!s.hasLost()) {
      s.makeMove(a.chooseAction(s,s.legalMoves()));
    }
    return s;
  }


  public static State runAgent(int agentID) {
    int delay = 10;

    State s = new State();
    Visualizer v = new Visualizer(s);
    Agent a;
    ArrayList<Feature> featureSet = new ArrayList<Feature>();
    Feature feature;
    switch (agentID) {
    case 1:
        System.out.println("Running DellacherieAgent");
        a = new DellacherieAgent();
        break;
    case 2:
        System.out.println("Running Cross Entropy Agent");
        featureSet.add(new MaxColHeightFeature());
        featureSet.add(new LandingHeightFeature());
        featureSet.add(new ErodedFeature());
        featureSet.add(new RowTransitionFeature());
        featureSet.add(new ColTransitionFeature());
        featureSet.add(new HolesFeature());
        featureSet.add(new CumulativeWellsFeature());
        featureSet.add(new HoleDepthFeature());
        featureSet.add(new RowHolesFeature());
        feature = new ConcatenateFeature(featureSet);

        a = new CrossEntropyAgent(feature, "ce5.txt");
        break;
    case 3:
        System.out.println("Running Policy Gradient Agent");
        featureSet.add(new MaxColHeightFeature());
        featureSet.add(new LandingHeightFeature());
        featureSet.add(new ErodedFeature());
        featureSet.add(new RowTransitionFeature());
        featureSet.add(new ColTransitionFeature());
        featureSet.add(new HolesFeature());
        featureSet.add(new CumulativeWellsFeature());
        featureSet.add(new HoleDepthFeature());
        featureSet.add(new RowHolesFeature());
        feature = new ConcatenateFeature(featureSet);

        a = new PolicyGradientAgent(feature, "policygrad1.txt");
        break;
    default:
        System.out.println("Running default agent");
        a = new Agent();
        break;
    }


    while(!s.hasLost()) {
      int action = a.chooseAction(s,s.legalMoves());
      s.makeMove(action);
      v.draw();
      v.drawNext(0,0);
      try {
        Thread.sleep(delay);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }
    v.dispose();
    return s;
  }

  public static void runEvaluation(int agentID) {
    Agent a;
    ArrayList<Feature> featureSet = new ArrayList<Feature>();
    Feature feature;
    switch (agentID) {
    case 1:
        System.out.println("Running DellacherieAgent");
        a = new DellacherieAgent();
        break;
    case 2:
        System.out.println("Running Cross Entropy Agent");
        featureSet.add(new MaxColHeightFeature());
        featureSet.add(new LandingHeightFeature());
        featureSet.add(new ErodedFeature());
        featureSet.add(new RowTransitionFeature());
        featureSet.add(new ColTransitionFeature());
        featureSet.add(new HolesFeature());
        featureSet.add(new CumulativeWellsFeature());
        featureSet.add(new HoleDepthFeature());
        featureSet.add(new RowHolesFeature());
        feature = new ConcatenateFeature(featureSet);

        a = new CrossEntropyAgent(feature, "ce5.txt");
        break;
    case 3:
        System.out.println("Running Policy Gradient Agent");
        featureSet.add(new MaxColHeightFeature());
        featureSet.add(new LandingHeightFeature());
        featureSet.add(new ErodedFeature());
        featureSet.add(new RowTransitionFeature());
        featureSet.add(new ColTransitionFeature());
        featureSet.add(new HolesFeature());
        featureSet.add(new CumulativeWellsFeature());
        featureSet.add(new HoleDepthFeature());
        featureSet.add(new RowHolesFeature());
        feature = new ConcatenateFeature(featureSet);

        a = new PolicyGradientAgent(feature, "policygrad1.txt");
        break;
    default:
        System.out.println("Running default agent");
        a = new Agent();
        break;
    }

    for (int i = 0; i < 30; i++) {
      State s = new State();
      while(!s.hasLost()) {
        int action = a.chooseAction(s,s.legalMoves());
        s.makeMove(action);
      }
      System.out.println(s.getRowsCleared());
    }
  }
  
  //allows a human player to play using the 4 arrow keys
  // left: move the piece left
  // right: move the piece right
  // top: change orientation
  // down: drop piece
  // there is no time limit for choosing where to place the next piece
  public static State runHumanPlayer()
  {
    int delay = 100;
    State s = new State();
    Visualizer v = new Visualizer(s);
    v.draw();
    v.drawNext(0,0);
    int turnCount = 0;
    while(!s.hasLost()) {
      try {
        Thread.sleep(delay);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }

      if (s.getTurnNumber() > turnCount) {
        turnCount++;
        ArrayList<Feature> featureSet = new ArrayList<Feature>();
        featureSet.add(new LandingHeightFeature());
        featureSet.add(new ErodedFeature());
        featureSet.add(new RowTransitionFeature());
        featureSet.add(new ColTransitionFeature());
        featureSet.add(new HolesFeature());
        featureSet.add(new CumulativeWellsFeature());
        featureSet.add(new HoleDepthFeature());
        featureSet.add(new RowHolesFeature());
        Feature feature = new ConcatenateFeature(featureSet);

        for (Double vec : feature.getFeatureVector(s))
          System.out.print(vec.doubleValue()+",");
        System.out.print('\n');
      }

    }
    v.dispose();
    return s;
  }

  public static void learnCrossEntropy() {
    ArrayList<Feature> featureSet = new ArrayList<Feature>();

    featureSet.add(new MaxColHeightFeature());
    featureSet.add(new LandingHeightFeature());
    featureSet.add(new ErodedFeature());
    featureSet.add(new RowTransitionFeature());
    featureSet.add(new ColTransitionFeature());
    featureSet.add(new HolesFeature());
    featureSet.add(new CumulativeWellsFeature());
    featureSet.add(new HoleDepthFeature());
    featureSet.add(new RowHolesFeature());
    Feature feature = new ConcatenateFeature(featureSet);

    CrossEntropyAgent agent = new CrossEntropyAgent(feature, "policygrad2.txt");
    agent.learn();
  }
  
  public static void learnPolicyGradient() {
    ArrayList<Feature> featureSet = new ArrayList<Feature>();

    featureSet.add(new MaxColHeightFeature());
    featureSet.add(new LandingHeightFeature());
    featureSet.add(new ErodedFeature());
    featureSet.add(new RowTransitionFeature());
    featureSet.add(new ColTransitionFeature());
    featureSet.add(new HolesFeature());
    featureSet.add(new CumulativeWellsFeature());
    featureSet.add(new HoleDepthFeature());
    featureSet.add(new RowHolesFeature());
    Feature feature = new ConcatenateFeature(featureSet);

    PolicyGradientAgent agent = new PolicyGradientAgent(feature, "policygrad2.txt");
    agent.learn();
}
}
