package tetris.rlpark_wrapper;

import rlpark.plugin.rltoys.envio.actions.Action;
import rlpark.plugin.rltoys.envio.actions.ActionArray;
import rlpark.plugin.rltoys.envio.observations.Legend;
import rlpark.plugin.rltoys.envio.rl.TRStep;
import rlpark.plugin.rltoys.math.ranges.Range;
import rlpark.plugin.rltoys.problems.ProblemBounded;
import rlpark.plugin.rltoys.problems.ProblemDiscreteAction;
import tetris.simulator.State;

public class TetrisProblem implements ProblemDiscreteAction, ProblemBounded {
  protected final Action[] Actions;
  public static final Legend legend = new Legend();
  private TRStep step;
  protected State tetrisState;

  @SuppressWarnings("static-access")
  public TetrisProblem() {
    tetrisState = new State();
    int max_moves = 0;
    for (int i =0; i < tetrisState.N_PIECES; i++) 
      if (tetrisState.legalMoves[i].length > max_moves)
        max_moves = tetrisState.legalMoves[i].length;

    Actions = new Action[max_moves];
    for (int i = 0; i < max_moves; i++)
      Actions[i] = new ActionArray(i);
  }

  protected void update(ActionArray action) {
    int move = Math.max(0, Math.min((int) ActionArray.toDouble(action), tetrisState.legalMoves().length - 1));
    if (!tetrisState.hasLost())
      tetrisState.makeMove(move);
  }

  @Override
  // Hardcoding the features till Ruta creates a feature class (FIXME)
  public TRStep step(Action action) {
    State lastState = new State(tetrisState);
    update((ActionArray) action);
    // feature - HARDOCDED (FIXME)
    double[] double_obs = new double[tetrisState.getTop().length + 1];
    for (int i = 0; i < tetrisState.getTop().length; i++)
      double_obs[i] = tetrisState.getTop()[i];
    double_obs[tetrisState.getTop().length] = tetrisState.getNextPiece();    
    // reward - HARDOCDED (FIXME)
    double reward = tetrisState.ROWS;// tetrisState.getRowsCleared() - lastState.getRowsCleared();
    for (int i = 0; i < tetrisState.getTop().length; i++)
      reward = Math.min(reward, tetrisState.ROWS - tetrisState.getTop()[i]);
    if (!tetrisState.hasLost()) {
      step = new TRStep(step, action, double_obs, reward);
    } else {
      step = new TRStep(step, action, double_obs, -10000.0);
      forceEndEpisode();
    }
    return step;
  }

  @Override
  public TRStep forceEndEpisode() {
    step = step.createEndingStep();
    return step;
  }

  @Override
  public TRStep initialize() {
    tetrisState = new State();

    // feature - HARDOCDED (FIXME)
    double[] double_obs = new double[tetrisState.getTop().length + 1];
    for (int i = 0; i<tetrisState.getTop().length; i++)
      double_obs[i] = tetrisState.getTop()[i];
    double_obs[tetrisState.getTop().length] = tetrisState.getNextPiece(); 

    step = new TRStep(double_obs, -1);
    return step;
  }

  @Override
  public Action[] actions() {
    return Actions;
  }

  @Override
  public TRStep lastStep() {
    return step;
  }

  public Legend legend() {
    return legend;
  }

  @SuppressWarnings("static-access")
  public int[] getObservationSize() {
    int[] observationSize = new int[tetrisState.getTop().length + 1];
    for (int i = 0; i < tetrisState.getTop().length; i++)
      observationSize[i] = tetrisState.ROWS;
    observationSize[tetrisState.getTop().length] = tetrisState.N_PIECES;
    return observationSize;
  }
  
  @SuppressWarnings("static-access")
  public Range[] getObservationRanges() {
    Range[] observationRange = new Range[tetrisState.getTop().length + 1];
    for (int i = 0; i < tetrisState.getTop().length; i++)
      observationRange[i] = new Range(0.0, tetrisState.ROWS);
    observationRange[tetrisState.getTop().length] = new Range(0.0, tetrisState.N_PIECES);
    return observationRange;
  }
  
  public int getRowsCleared() {
    return tetrisState.getRowsCleared();
  }
}