package tetris.agent;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;

import tetris.simulator.State;
import tetris.features.*;

class Tuple {
  public State state;
  public State nextState;
  public int action;
  public double reward;
}


public class PolicyGradientAgent extends Agent {

  double[] weightVector;
  Feature feature;

  int numTrajectories;
  double temperature;
  double alpha;
  double bias;

  String filename;
  public PolicyGradientAgent(Feature feature, String filename) {
    this.feature = feature;
    int featureDim = feature.getFeatureVector(new State()).size();
    weightVector = new double[featureDim];
    this.filename = filename;

    try {
      File f = new File(filename);
      if (f.exists()) {
        System.out.println("Loading mean variance from file");
        LoadMeanVariance();
      } else {
        System.out.println("File doesnt exist - will create new");
      }
    }catch(Exception e){
      e.printStackTrace();
    }

    numTrajectories = 10;
    temperature = 20;
    bias = 3000;
    alpha = 0.001;
  }

  public void LoadMeanVariance() {
    Scanner scan;
    File file = new File(filename);
    try {
      scan = new Scanner(file);
      for (int i = 0; i < weightVector.length; i++) {
        weightVector[i] = scan.nextDouble();
      }
    } catch (FileNotFoundException e1) {
      e1.printStackTrace();
    }
  }



  public void SaveMeanVariance() {
    try {
      PrintStream output = new PrintStream(new File(filename));
      for (int i = 0; i < weightVector.length; i++) {
        output.print(weightVector[i]+" ");
      }
      output.println(" ");
      output.close();
    }catch(FileNotFoundException e) {
      e.printStackTrace();
    } 
  }

  /*
   * Evaluate Value of action
   */
  protected double evaluateValue(State state, int action, double[] weight){
    State newState = new State(state);
    newState.makeMove(action);
    ArrayList<Double> featureVector = feature.getFeatureVector(newState);
    double value = 0;
    for (int j = 0; j < featureVector.size(); j++) {
      value += weight[j]*featureVector.get(j).doubleValue();
    }
    return value;
  }

  /*
   * Takes state, weight and returns optimal action (soft max)
   */
  protected int softMaxPolicy(State s, double[] weight ){
    // Calculate unnormalized p(a/x)
    double[] policy = new double[s.legalMoves().length];
    double Z = 0;
    double minValue = Double.POSITIVE_INFINITY;
    for (int i = 0; i< s.legalMoves().length; i++){
      policy[i] = evaluateValue(s, i, weight);
      if (policy[i] < minValue)
        minValue = policy[i];
    }

    for (int i = 0; i< s.legalMoves().length; i++){
      policy[i] = Math.exp((-minValue+evaluateValue(s, i, weight))/temperature);
      Z += policy[i];
    }
    // Normalization
    for (int i = 0; i < policy.length; i++){
      policy[i] /= Z;
    }

    // CDF
    double[] cdf = new double[policy.length];
    for (int i=0; i<policy.length;i++) {
      if (i==0) {
        cdf[i] = policy[i];
      }
      else{
        cdf[i] = cdf[i-1]+policy[i];
      }
    }

    double act = Math.random();

    int returnAction = 0;
    for (int i=0; i<cdf.length;i++){
      if (act<=cdf[i]){
        returnAction = i;
        break;
      }
    }
    
    return returnAction;
  }


  /*
   * Takes state, does an appropriate action, returns the tuple
   */
  protected Tuple collectTuples(State s, double[] weight){
    State s0 = new State(s);
    // select action based on policy for s0
    int actToTake = softMaxPolicy(s0, weight);
    // forward simulate and get reward
    State s1 = new State(s0);
    s1.makeMove(actToTake);
    double r0 = s1.getRowsCleared() - s0.getRowsCleared();

    // Save {s,a,r}
    Tuple tupleOut = new Tuple();
    tupleOut.state = s0;
    tupleOut.action = actToTake;
    tupleOut.reward = r0;
    tupleOut.nextState = s1;
    return tupleOut;		
  }

  /*
   * GenerateTrajectory records a trajectory by running the weight till completion
   */
  protected List<Tuple> generateTrajectory(double[] weight) {
    State tetrisState = new State();
    List<Tuple> trajectory = new ArrayList<Tuple>();
    while (!tetrisState.hasLost()) {
      Tuple sample = new Tuple();
      sample = collectTuples(tetrisState, weight);
      trajectory.add(sample);
      tetrisState = sample.nextState;
    }
    return trajectory;
  }

  /*
   * Generate multiple trajectories calls single trajectory generation multiple times
   */
  protected List<List<Tuple>> generateMultipleTrajectory(int numTraj, double[] wt){
    List<List<Tuple>> allTrajectories = new ArrayList<List<Tuple>>();
    for (int N = 0; N < numTraj; N++) {
      List<Tuple> trajectory = new ArrayList<Tuple>();
      trajectory = generateTrajectory(wt);
      allTrajectories.add(trajectory);
    }
    return allTrajectories;
  }

  /*
   * Computes log policy gradient
   */
  public double[] delLogPolicy(State s, int action, double[] weight){
    State newState = new State(s);
    newState.makeMove(action);

    //double[] fsa = getFeatures(newState);
    ArrayList<Double> fsa = feature.getFeatureVector(newState);

    double[] delLogTerm2 = new double[fsa.size()];
    double sumTerm = 0;

    double minValue = Double.POSITIVE_INFINITY;
    double expVal[] = new double[s.legalMoves().length];
    for (int i = 0; i< s.legalMoves().length; i++) {
      expVal[i] = evaluateValue(s, i, weight);
      if (expVal[i] < minValue)
        minValue = expVal[i];
    }

    for (int i = 0; i< s.legalMoves().length; i++){
      expVal[i] = Math.exp((-minValue+expVal[i])/temperature);
    }

    for (int i = 0; i < s.legalMoves().length; i++){
      sumTerm += expVal[i];
      State sClone = new State(s);
      sClone.makeMove(i);
      ArrayList<Double> fsaNew = feature.getFeatureVector(sClone);
      for (int j = 0; j < fsaNew.size(); j++)
        delLogTerm2[j] +=  fsaNew.get(j)*expVal[i];
    }

    double[] delLog = new double[fsa.size()];
    for (int i = 0; i < delLog.length; i++) {
      delLog[i] = fsa.get(i) - delLogTerm2[i]/sumTerm;
    }
    
    return delLog;
  }

  /*
   * Compute delJ (inner loop)
   */
  public double[] delJ(List<Tuple> traj, double[] weight){
    double[] delJ = new double[weight.length];
    double R = 0.0;

    // end state reward is sum of all state reward as it is = totalrows cleared
    // thus end state reward = sum rt where rt = row cleared in time step t
    //R = traj.get(traj.size()-1).reward;

    // get sum of dellogP		
    for (int i = 0; i < traj.size(); i++) {
      double[] delLogP = delLogPolicy(traj.get(i).state, traj.get(i).action, weight);
      R += traj.get(i).reward;
      for (int j = 0; j < delJ.length; j++)
        delJ[j] += delLogP[j];
    }

    // get delJ by multiplying delLogP with R		
    for (int j = 0; j < delJ.length; j++)
      delJ[j] *= R;
    return delJ;
  }

  /*
   * Compute delJMultipleTraj - takes in all the trajectories, and weight and computes gradient
   */
  public double[] delJMultipleTraj(List<List<Tuple>> allTraj, double[] weight){
    double[] delJAll = new double[weight.length];

    for (int N=0; N < allTraj.size(); N++) {
      List<Tuple> traj = new ArrayList<Tuple>();
      traj = allTraj.get(N);
      double[] delJ = delJ(traj, weight);
      for (int i = 0; i < delJAll.length; i++)
        delJAll[i] += delJ[i]/(double)allTraj.size();
    }
    return delJAll;
  }

  public void learn() {
    while (true) {
      List<List<Tuple>>  trainDatabase = new ArrayList<List<Tuple>>();
      trainDatabase = generateMultipleTrajectory(numTrajectories, weightVector);

      double avgReward = 0;
      for (int i = 0; i < trainDatabase.size(); i++) {
        for (int j = 0; j < trainDatabase.get(i).size(); j++) {
          avgReward += trainDatabase.get(i).get(j).reward/(double)(trainDatabase.size());
        }
      }
      System.out.println(avgReward);
      for (int i = 0; i < weightVector.length; i++)
        System.out.print(weightVector[i] + " ");
      System.out.println(" ");

      for (int counter = 0; counter < 5; counter ++) {
      double[] delJ = delJMultipleTraj(trainDatabase, weightVector);
      for (int i = 0; i < delJ.length; i++) {
        weightVector[i] += alpha*delJ[i];
      }
      //System.out.println(normdelJ);
      }
      SaveMeanVariance();
    }
  }

  public int chooseAction(State s, int[][] legalMoves) {
    // select action based on policy for s
    return softMaxPolicy(s, weightVector);
  }

}
