package cbbx_sm.decision_maker.search;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;

import cbbx_sm.decision_maker.search.States.NNProperty;
import cbbx_sm.probabilistic_model.Cluster;
import cbbx_sm.utils.ExperimentManager;



public class DynamicProgrammingLookahead implements Serializable{
  
  //1 hour
  //public static long TIMEOUT = 1000*60*60; 

  public static boolean debug = true;
  public static boolean lastCallPrematureTermination = false;

  private final WorldStateTransition worldStateTransition;
  private final States states;
  private final int numberOfTimeStampsLookAhead;
  private final boolean discounting;
  private final double epsilon;
  private final double zoomEventScoreFactor;
  private final double upEventScoreFactor;
  private final DynamicProgrammingTableEntry[] lookaheadTable;
    
  public DynamicProgrammingLookahead(WorldStateTransition worldStateTransition,
		  int numberOfTimeStampsLookAhead, boolean discounting,
			double error, int numberOfStates, double zoomEventScoreFactor, double upEventScoreFactor) {
	  this.worldStateTransition = worldStateTransition;
	  this.states = new States(worldStateTransition, ExperimentManager.usePreComputedGrid);
	  this.numberOfTimeStampsLookAhead = numberOfTimeStampsLookAhead;
	  this.discounting = discounting;
	  this.epsilon = numberOfStates;
	  this.zoomEventScoreFactor = zoomEventScoreFactor;
	  this.upEventScoreFactor = upEventScoreFactor;
	  this.lookaheadTable =
		  populateScoreArray(
				  worldStateTransition, states, numberOfTimeStampsLookAhead, discounting,
				  error, numberOfStates, zoomEventScoreFactor, upEventScoreFactor);
	  
  }
  
	public static DynamicProgrammingTableEntry[] populateScoreArray(
			WorldStateTransition worldStateTransition, States states, int N, boolean discounting,
			double error, int numberOfStates, double zoomEventScoreFactor, double upEventScoreFactor){
		System.out.println(states.size());
		long startTime = System.currentTimeMillis();
		lastCallPrematureTermination = false;
			
		/** 
		 * ScoreArray[k][s] represents the maximal achievable score of being in state s when the
		 * current time is k.
		 * 
		 * N is the "life expectancy" of the world, or the look ahead # seconds (Horizon). 
		 * 
		 */
		DynamicProgrammingTableEntry[][] scoreArray = initializeArray(states);
		
		
		/** Populate the score array with the best utility values. */
		int iterations = 1;
		for (int k=N-1; k>=0; k--){
			for (double[] s:states){
				int s_id = states.get(s);
	
				/**
				 * Compute the maximal expected score of being in state s at time k.
				 * For all possible actions compute the maximal expected score. 
				 * */
				DynamicProgrammingTableEntry bestScoreEntry = new DynamicProgrammingTableEntry();
				scoreArray[0][s_id].originalState = new SchedulerState(s).toString();
				scoreArray[0][s_id].actionToScore = new HashMap<String, Double>();
				bestScoreEntry.scoreFloor = 0;
				bestScoreEntry.scoreCeiling = 0;
				bestScoreEntry.scoreInterpulated = 0;
				
				for (String action: worldStateTransition.getSchedules()){
					DynamicProgrammingTableEntry currentScoreEntry = new DynamicProgrammingTableEntry();
	
					currentScoreEntry.scoreFloor = actionExpectedScore(worldStateTransition, scoreArray, s, action, states, NNProperty.floor, zoomEventScoreFactor, upEventScoreFactor, true);
					currentScoreEntry.scoreCeiling = actionExpectedScore(worldStateTransition, scoreArray, s, action, states, NNProperty.ceiling, zoomEventScoreFactor, upEventScoreFactor, true);
					currentScoreEntry.scoreNN = actionExpectedScore(worldStateTransition, scoreArray, s, action, states, NNProperty.nn, zoomEventScoreFactor, upEventScoreFactor, true);
					currentScoreEntry.scoreInterpulated = 0.5*currentScoreEntry.scoreFloor+0.5*currentScoreEntry.scoreCeiling;
	
					scoreArray[0][s_id].actionToScore.put(action, currentScoreEntry.scoreNN);
					
					bestScoreEntry.scoreFloor = Math.max(bestScoreEntry.scoreFloor, currentScoreEntry.scoreFloor);
					bestScoreEntry.scoreCeiling = Math.max(bestScoreEntry.scoreCeiling, currentScoreEntry.scoreCeiling);
					bestScoreEntry.scoreNN = Math.max(bestScoreEntry.scoreNN, currentScoreEntry.scoreNN);
					bestScoreEntry.scoreInterpulated = Math.max(bestScoreEntry.scoreInterpulated, currentScoreEntry.scoreInterpulated);
	
					/** Save the best action to take for time k. */
					if (bestScoreEntry.scoreFloor == currentScoreEntry.scoreFloor){
						scoreArray[0][s_id].bestActionFloor = action;
					}
					if (bestScoreEntry.scoreCeiling == currentScoreEntry.scoreCeiling){
						scoreArray[0][s_id].bestActionCeiling = action;
					}
					if (bestScoreEntry.scoreNN == currentScoreEntry.scoreNN){
						scoreArray[0][s_id].bestActionNN = action;
					}
					if (bestScoreEntry.scoreInterpulated == currentScoreEntry.scoreInterpulated){
						scoreArray[0][s_id].bestActionInterpulated = action;
					}					
					
				}
				
	
				/** Put in scoreArray[k][s] the value max expected scoring action. */
				if (discounting){
					bestScoreEntry.scoreFloor *=Math.pow(0.5, k);
					bestScoreEntry.scoreCeiling *=Math.pow(0.5, k);
					bestScoreEntry.scoreNN *=Math.pow(0.5, k);
					bestScoreEntry.scoreInterpulated *=Math.pow(0.5, k);
				}
				scoreArray[0][s_id].scoreFloor = bestScoreEntry.scoreFloor;
				scoreArray[0][s_id].scoreCeiling = bestScoreEntry.scoreCeiling;
				scoreArray[0][s_id].scoreNN = bestScoreEntry.scoreNN;
				scoreArray[0][s_id].scoreInterpulated = bestScoreEntry.scoreInterpulated;
			}
			
			/** Prepare for the next iteration. 
			 */
			boolean errorExceeded = false;
			double errorDifference = 0;
			for (int s=0; s<states.size(); s++){
				scoreArray[1][s] = new DynamicProgrammingTableEntry(scoreArray[0][s]);
				double currentDifference = scoreArray[1][s].scoreCeiling-scoreArray[1][s].scoreFloor;
				if (currentDifference > error) {
					errorExceeded = true;
					errorDifference = currentDifference;
				}
			}	
			/**
			 *  See if the threshold for the accuracy was violated, if so add states and restart. */

			// If we are about to return and there are errors greater than delta, add states to the grid.
			if (errorExceeded
			    && (states.size() < numberOfStates)) {
				System.out.println("Restarting due to epsilon = "+error+" and current difference: "+errorDifference+" Iterations = "+(iterations++));
				states.addTop();
				scoreArray = initializeArray(states);
				System.out.println("States size is now: " + states.size());
				k=N; // to be subtracted right away in the for statement.
			 } else {
			    lastCallPrematureTermination = true;
			 }
		}
		
	//		System.out.println("Number of times referenced");
	//		for (double[] s:states){
	//			int s_id = states.get(s);
	//			String motion = "";
	//			for (int j=0; j<Schedules.getSchedules().length-1; j++){
	//				motion+=s[j]+":";
	//			}
	//			System.out.println(motion+"\t"+numberOfTimesReferenced[s_id]);
	//		}
		
		return scoreArray[0];
	
	}

  /**
   * Computes the best action to take based on the current state and the pre-computed 
   * action->expected score map.
   * @param schedulerState map from cluster to its probability in the next timestamp
   * @param floor
   * @return the best action to take
   */
public String getBestAction(Map<Cluster, Double> schedulerState, boolean recordMisses) {
	    NNProperty approximationTechnique = ExperimentManager.approximationTechnique;
		double[] state = states.get(schedulerState);
		Integer sId = states.get(state);
		if (sId == null) {
			sId = states.get(states.approximateProbabilities(state, approximationTechnique, recordMisses));
		}
		
		// If we are interested in a simple table lookup or computing the best action utility.
		if (ExperimentManager.directActionLookup) { 
		  return getAction(lookaheadTable[sId], ExperimentManager.scoreUsed);
		}
		 
		double[] nextEvent = states.get(schedulerState);

		String bestAction = Schedules.UP;
		double bestActionScore = 0;
		for (String action: worldStateTransition.getSchedules()) {
			double currentActionScore = 0;
			if (action.compareTo(Schedules.UP) != 0) {
				// If action is to zoom in, take into account both outcomes (motion/nomotion).
				int actionIndex = worldStateTransition.scheduleToCameraId(action);
				double probability = nextEvent[actionIndex];
				nextEvent[actionIndex] = Event.p1;
				currentActionScore += 
						probability * 
						(zoomEventScoreFactor + 
								getScore(lookaheadTable[states.get(states.approximateProbabilities(nextEvent, approximationTechnique, recordMisses))], ExperimentManager.scoreUsed));
				nextEvent[actionIndex] = Event.p0;
				currentActionScore += (1 - probability) * 
					getScore(lookaheadTable[states.get(states.approximateProbabilities(nextEvent, approximationTechnique, recordMisses))], ExperimentManager.scoreUsed);
				nextEvent[actionIndex] = probability;
			} else {
				// Take into account all state transition outcomes.
				for (double[] s_next: 
					getNextStates(worldStateTransition, nextEvent,Schedules.UP)){
					double prob = 1;
					int numberOfLowResEvents = 0;
					for (int i = 0; i < s_next.length; i++) {						
						if (s_next[i] == Event.p1) {
							prob *= nextEvent[i];	
							numberOfLowResEvents ++;
						} else { //s_next[i] == Event.p0
							prob *= 1 - nextEvent[i];
						}
					}
					double upLookaheadGlobalReward = 0; //Up bonus.
					currentActionScore += 
							prob * (
									(upLookaheadGlobalReward * numberOfLowResEvents) // Up lookahead network reward
									+ (upEventScoreFactor * numberOfLowResEvents) // Up reward
									+ getScore(lookaheadTable[states.get(states.approximateProbabilities(s_next, approximationTechnique, recordMisses))], ExperimentManager.scoreUsed) // Local Lookahead reward
									);
				}
			}
			if (currentActionScore > bestActionScore) {
				bestActionScore = currentActionScore;
				bestAction = action;
			}
		}
		return bestAction;
	  }
	private String getAction(
		DynamicProgrammingTableEntry entry,
		NNProperty scoreUsed) {
		String bestAction = null;
		switch (scoreUsed) {
		case ceiling:
			bestAction = entry.bestActionCeiling;
		case floor:
			bestAction = entry.bestActionFloor;
		case nn:
			bestAction = entry.bestActionNN;
		}
		return bestAction;
	}
	private Double getScore(
			DynamicProgrammingTableEntry entry,
			NNProperty scoreUsed) {
			switch (scoreUsed) {
			case ceiling:
				return entry.scoreCeiling;
			case floor:
				return entry.scoreFloor;
			case nn:
				return entry.scoreNN;
			}
			return null;
		}

	private static DynamicProgrammingTableEntry[][] initializeArray(States states) {
		DynamicProgrammingTableEntry[][] scoreArray = new DynamicProgrammingTableEntry[2][states.size()];
		for (double[] s:states){
			for (int k=0; k<2; k++){
				int s_id = states.get(s);
				scoreArray[k][s_id] = new DynamicProgrammingTableEntry();
				/**
				 * No utility when the end of time has arrived.
				 * */
				scoreArray[k][s_id].scoreCeiling = 0;	
				scoreArray[k][s_id].scoreFloor = 0;	
				scoreArray[k][s_id].scoreInterpulated = 0;
				scoreArray[k][s_id].scoreNN = 0;
			}
		}
		return scoreArray; 
	}

	private static double actionExpectedScore(
			WorldStateTransition worldStateTransition, DynamicProgrammingTableEntry[][] scoreArray, double[] s,
			String action, States states, NNProperty nNProperty, double zoomEventScoreFactor, double upEventScoreFactor, boolean log) {
		double actionExpectedScore = 0;
		for (double[] s_next: getNextStates(worldStateTransition, s,action)){
			double nextStateUtility =
					ScheduleConfigurationNode.getUtilityForStateAndSchedule(
							action, s_next, worldStateTransition, zoomEventScoreFactor, upEventScoreFactor);
			double[] s_next_approx = approximateEvent(states, s_next, nNProperty, log);

			int s_next_id = states.get(s_next_approx);
			double s_next_expectedScore = 0;
			switch (nNProperty) {
			case floor:
				s_next_expectedScore = scoreArray[1][s_next_id].scoreFloor;
				break;
			case ceiling:
				s_next_expectedScore = scoreArray[1][s_next_id].scoreCeiling;
				break;
			case nn:
				s_next_expectedScore = scoreArray[1][s_next_id].scoreNN;
				break;
			}
			actionExpectedScore +=  (nextStateUtility + s_next_expectedScore)*worldStateTransition.getTransitionProbability(s, s_next, action);
		}
		return actionExpectedScore;
	}

	/** Generate the events that the scheduler can arrive to from a given state and action.
	 * 
	 * Generate the next level of the world, set the probabilities to 0/1 based scheduler decision. 
	 * Make the probabilities reflect what we assumed the scheduler had seen. 
	 * 1) if up, then only those with motion/nomotion
	 * 2) otherwise, two events only 1/0 and roll forward the rest of the events.
	 * @param s
	 * @param u the action that is taking place from the state s.
	 * @return
	 */
	public static ArrayList<double[]> getNextStates(
			WorldStateTransition worldStateTransition, double[] s, String u) {
		ScheduleConfigurationNode currentNode = new ScheduleConfigurationNode(null, s, null, worldStateTransition, 0d,1,0);
		HashSet<ScheduleConfigurationNode> nextStates = currentNode.getNextNodesForSchedule(u);
		
		ArrayList<double[]> result = new ArrayList<double[]>();
		for (ScheduleConfigurationNode nextNode: nextStates){
			result.add(nextNode.CiEvent);
		}
		
		return result;
	}

	private static double[] approximateEvent(States states,
			double[] newEventsWithoutApproximation, NNProperty nNProperty,
			boolean log) {
		if (states.get(newEventsWithoutApproximation) == null) {
			return states.approximateProbabilities(newEventsWithoutApproximation, nNProperty, log);
		} else{
			return newEventsWithoutApproximation;				
		}
	}

	@Override
	public String toString() {
		return "DynamicProgrammingLookahead_discounting=" + discounting
				+ "_epsilon=" + epsilon + "_numberOfTimeStampsLookAhead="
				+ numberOfTimeStampsLookAhead + "_upEventScoreFactor="
				+ upEventScoreFactor + ", worldStateTransition="
				+ worldStateTransition + ", zoomEventScoreFactor="
				+ zoomEventScoreFactor + "]";
	}

	public States getStates() {
		return this.states;
	}

	public DynamicProgrammingTableEntry[] getTable() {
		return this.lookaheadTable;
	}

	public WorldStateTransition getWorldStateTransition() {
		return this.worldStateTransition;
	}
}
