package tetris.agent;

import tetris.simulator.State;
import tetris.features.*;

public class ValueFunctionLearning implements Runnable {
  protected State tetrisState;
  protected double[][] valueWeights;
  private Feature_extraction featureObj;;
  
  private final double alpha;
  private final double gamma;
  private final double epsilon;
  private final double beta;
  private final int k;
  
  public ValueFunctionLearning() {
    alpha = 1e-7;
    gamma = 0.99;
    epsilon = 0.05;
    beta = 0.9997;
    k = 5;
    
    tetrisState = new State();
    valueWeights = new double[getFeatureVector(tetrisState).length][k];
    featureObj = new Feature_extraction();    
  }
  
  State forwardSimulate (State inputState, int action) {
    int move = Math.max(0, Math.min(action, inputState.legalMoves().length - 1));
    State outputState = new State(inputState);
    if (!inputState.hasLost())
      outputState.makeMove(move);
    return outputState;
  }
  
  double[] getFeatureVector (State inputState) {
    State featureState = new State(inputState);
    double[] double_obs = new double[featureState.getTop().length + 1];
    for (int i = 0; i < featureState.getTop().length; i++)
      double_obs[i] = (double)featureState.getTop()[i]/(featureState.ROWS - 1);
    double_obs[featureState.getTop().length] = (double)featureState.getNextPiece() / (featureState.N_PIECES - 1);
    return double_obs;
  }
  
  double getReward (State inputState, int action) {
    State outputState = forwardSimulate (inputState, action);
    if (outputState.hasLost())
      return -10;
    else
      return (outputState.getRowsCleared() - inputState.getRowsCleared()) + 0.01*(outputState.ROWS - featureObj.maxtop(outputState))/outputState.ROWS;
  }
  
  double valueFunction (double[] feature) {
    assert(feature.length == valueWeights.length);
    double value = 0;
    for (int i = 0; i < feature.length; i++)
      for (int j = 0; j < k; j++)
        value += valueWeights[i][j]*Math.pow(feature[i], j + 1);
    return value;
  }
  
  double valueFunction(State inputState,  int action) {
    State outputState = forwardSimulate (inputState, action);
    return valueFunction(getFeatureVector(outputState));
  }
  
  void updateWeights(double delta, double[] feature) {
    for (int i = 0; i < feature.length; i++)
      for (int j = 0; j < k; j++)
        valueWeights[i][j] -= alpha*delta*Math.pow(feature[i], j + 1);
  }

  @Override
  public void run() {
    tetrisState = new State();
    int nbEpisode = 0;
    while (true) {
      
      double newValue = 0;
      int chosenAction = 0;
      if (Math.random() < epsilon*Math.pow(beta, nbEpisode)) {
        chosenAction = (int)(Math.random()*tetrisState.legalMoves().length); 
        newValue = getReward (tetrisState, chosenAction) + gamma*valueFunction(tetrisState, chosenAction);
      } else {
        for (int i = 0; i < tetrisState.legalMoves().length; i++) {
          double nextValue = getReward (tetrisState, i) + gamma*valueFunction(tetrisState, i);
          if (nextValue > newValue) {
            newValue = nextValue;
            chosenAction = i;
          }
        }
      }

      double[] feature = getFeatureVector(tetrisState);
      double delta = valueFunction(feature) - newValue;
      //System.out.println(delta);
      updateWeights(delta, feature);
      
      tetrisState = forwardSimulate(tetrisState, chosenAction);
      
      if (tetrisState.hasLost()) {
        System.out.println(String.format("Episode %d: %d rows cleared", nbEpisode, tetrisState.getRowsCleared()));
        tetrisState = new State();
        nbEpisode++;
      } 
    }
  }
}