package template;

import java.util.ArrayList;

import logist.simulation.Vehicle;
import logist.agent.Agent;
import logist.behavior.ReactiveBehavior;
import logist.plan.Action;
import logist.plan.Action.Move;
import logist.plan.Action.Pickup;
import logist.task.Task;
import logist.task.TaskDistribution;
import logist.topology.Topology;
import logist.topology.Topology.City;

public class ReactiveAgent implements ReactiveBehavior {

	private ArrayList<State> states;

	@Override
	public void setup(Topology topology, TaskDistribution td, Agent agent) {
		// Create states
		createStates(topology);

		// Compute the state values
		computeStateValues(topology, td, agent);
		
		// Select strategy
		choosePolicy(topology, td, agent);
		
//		System.out.println(agent.name());
		
		// Print strategy
		for(State s: states){
			System.out.println(s);
		}
	}
	
	@Override
	public Action act(Vehicle vehicle, Task availableTask) {
		Action action;
		City currentCity = vehicle.getCurrentCity();
		
		// What state am I in?
		State currentState;
		if(availableTask == null){
			currentState = getState(currentCity, null);
		}
		else{
			currentState = getState(currentCity, availableTask.deliveryCity);
		}
		
		// What is my policy in this state
		BestAction bestAction = currentState.getBestAction();
		
		//bestAction is Pickup
		if(bestAction.isPickUp()){
			action = new Pickup(availableTask);
		}
		else{ // bestAction is Move
			action = new Move(bestAction.getCity());
		}
		
		// Printing steps along the road
		System.out.println(currentState +". Action: " + action.toLongString());
		
		return action;
	}
	
	private void createStates(Topology topology) {
		states = new ArrayList<State>();
		State state;
		for (City city : topology) {
			
			// Add state with option no packet
			double value = 0.0;
			state = new State(city, null, value, null);
			states.add(state);
			
			// Add state with packets to each city
			for (City packetTo : topology){
//				if(city != packetTo){
					state = new State(city, packetTo, 0, null);
					states.add(state);
//				}
			}
		}
	}

	// Computes the values of the state using  and actions.
	private void computeStateValues(Topology topology, TaskDistribution td,
			Agent agent) {
		double discountFactor = agent.readProperty("discount-factor", Double.class, 0.85);
		double epsilon = agent.readProperty("epsilon", Double.class, 0.001);
		double totalWeightedSumOfFutureValues;// This variable is sum(p(s')*s'.value)
		double temporalValue = 0.0;
	
		do{
			for(State state: states){
				// for (all actions)
				// Actions are: pickup a task (if there is any) or move to the next city.
				// We will apply the algorithm: optimize the value of the state using MDP
				
				// Action: Pickup (if available)
				if(state.getPacketTo() != null){
					City nextCity = state.getPacketTo(); 
					
					totalWeightedSumOfFutureValues = computeTotalWeightedSumOfFV(topology,td,nextCity);					
					temporalValue = reward(state, new BestAction(null), td, agent) + discountFactor * totalWeightedSumOfFutureValues;
					
					if(temporalValue > state.getValue()){
						state.setValue(temporalValue);
					}	
				}
				
				
				// Action: Move
				for (City nextCity: state.getCity()){
					totalWeightedSumOfFutureValues = computeTotalWeightedSumOfFV(topology,td,nextCity);
					temporalValue = reward(state, new BestAction(nextCity), td, agent) + discountFactor * totalWeightedSumOfFutureValues;
					
					if(temporalValue > state.getValue()){
						state.setValue(temporalValue);
					}
				}
			}
		} while(goodEnough(epsilon));
		
	}
	
	private void choosePolicy(Topology topology, TaskDistribution td, Agent agent) {
		double temporalValue;
		double bestValue;
		double discountFactor = agent.readProperty("discount-factor", Double.class, 0.95);
		double totalWeightedSumOfFutureValues;// This variable is sum(p(s')*s'.value)
		
		for(State state: states){
			bestValue = Double.NEGATIVE_INFINITY;
			
			// For all possible actions
			// Action: Pickup (if available)
			if(state.getPacketTo() != null){
				City nextCity = state.getPacketTo(); 

				totalWeightedSumOfFutureValues = computeTotalWeightedSumOfFV(topology,td,nextCity);
				temporalValue = reward(state, new BestAction(null), td, agent) + discountFactor * totalWeightedSumOfFutureValues;
				
				if(temporalValue > bestValue){
					bestValue = temporalValue;
					// Set best action to PickUp
					state.setBestAction(new BestAction(null));
				}
			}
			
			
			// Action: Move
			for (City nextCity: state.getCity()){
				totalWeightedSumOfFutureValues = computeTotalWeightedSumOfFV(topology, td, nextCity);
				temporalValue = reward(state, new BestAction(nextCity), td, agent) + discountFactor * totalWeightedSumOfFutureValues;
				
				if(temporalValue > bestValue){
					bestValue = temporalValue;
					// Set best action to Move to the next city
					state.setBestAction(new BestAction(nextCity));
				}
			}
		}
		
	}
	
	//*******************HELPER FUNCTONS*********************************
	
	private boolean goodEnough(double epsilon) {
		double variance;
		double maxAbsNumber;
		
		for(State state: states){
			
			maxAbsNumber = Math.max(Math.abs(state.getValue()), Math.abs(state.getLastValue()));
			if(maxAbsNumber == 0){
				// This could only be possible if both actual value and last value are zero
				maxAbsNumber = 0.00000001;
			}
			variance = Math.abs(state.getValue() - state.getLastValue()) / maxAbsNumber;
			
			if(variance > epsilon){
				return false;
			}
		}		
		return true;
	}

	private State getState(City city, City packetTo) {
		for(State state: states){
			if(state.getCity() == city && state.getPacketTo() == packetTo){
				return state;
			}
		}
		return null;
	}
	
    // should add agent as an attribute 
	private double reward(State state, BestAction action, TaskDistribution td, Agent agent) {
		double costFactor = agent.vehicles().get(0).costPerKm();
		double reward= 0.0;
		if(action.isPickUp()){
			City deliveryFrom = state.getCity();
			City deliveryTo = state.getPacketTo();
			double gain = td.reward(deliveryFrom, deliveryTo);
			double cost = deliveryFrom.distanceTo(deliveryTo);		
	
			reward = gain - cost * costFactor;
		}
		else{
	        reward = -state.getCity().distanceTo(action.getCity()) * costFactor;	
		}
		return reward;
	}

	
	private double computeTotalWeightedSumOfFV(Topology topology, TaskDistribution td, City nextCity){
		// Go through all possible next states: states where the city is the city we're delivering/moving to 
		// and the packetTo attribute can be any city or null(for loop). The city in the next state is known  with 
		// certainty, but the state is not(because the packetTo attribute of the next state depends on the 
		// probability distribution)
		
		double totalWeightedSumOfFutureValues = 0.0;// This variable is sum(p(s')*s'.value)
		State nextState = null;
		for(City packetTo: topology){
//			if(nextCity != packetTo){
				nextState= getState(nextCity, packetTo);
				totalWeightedSumOfFutureValues += td.probability(nextCity, packetTo) * nextState.getValue();
//			}
		}
		
		nextState= getState(nextCity, null);
		totalWeightedSumOfFutureValues +=  td.probability(nextCity, null)* nextState.getValue();
		
		return totalWeightedSumOfFutureValues;
	}
}