	import java.awt.geom.Point2D.Double;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Random;
import java.util.Vector;
	
	
public class ReinforceSarsaSpeed extends Agent implements Serializable{
private static final long serialVersionUID = -4866906315919206043L;

	// parameters
		double discount = 0.8; // discount factor
		double lambda = 0.8;	//decay factor for eligibility traces
		final double minimal_e_trace = 0.1; //minimal e_trace value needed to be retained
		
		double learnSpeed = 0.0001; //learning speed (alpha) of the neural network
		
		boolean debug = false;
		final boolean enable_e_traces = true;
		
		ThreeLayerNNet[] qs = new ThreeLayerNNet[4]; //neural network

		double previous_q_value; //q value for timestep t - 1
		double q_value; //q value for timestep t
		
		long runNr = 0; //how many timesteps have we been running?

		double[] result = new double[1]; //store the result
		Random gen; //generate random numbers
		Vector<java.lang.Double> e_traces = new Vector<java.lang.Double>();
		Vector<double[]> e_traces_states = new Vector<double[]>();
		
		//store previous and current actions/inputs
		int current_action = 0;
		int previous_action = 0;
		double previous_reward;
		double[] previous_input;
		double[] flat_input;
		
		//indices for arrays, so no confusion arrises over the array indices
		final int right = 0;
		final int left = 1;
		final int down = 2;
		final int up = 3; 

		
		// Constructor
		public ReinforceSarsaSpeed (int width, int height,double learnspeed, int hiddenNeurons) {
			super();
			exploreChance = 0.1 ; //exploratiekans
			learnSpeed = learnspeed ;
			
			for(int i=0; i < 4; i++)
			{
				qs[i] = new ThreeLayerNNet(2 + ((2*width+1) * (2*height+1)), hiddenNeurons, 1,
						learnSpeed, true, true, false); //load neural network
			}
			
			gen = new Random(); //initialize random number generator
		
			setExplorationChance(exploreChance);
			
		}

		
	   public void setLearnspeed(double learnspeed) {
	       this.learnSpeed = learnspeed;
	       for(ThreeLayerNNet tmp:qs) tmp.setLearningSpeed(learnspeed);
	   }
	   
	   public void setDiscount(double discount) {
	       this.discount = discount;
	   }
	   
	   public void setLambda(double lambda) {
	       this.lambda = lambda;
	   }


		//requests a move to be made from the agent
		public Double getMove(double[][] input, Car car) {
			
			runNr += 1;
			
			previous_action = current_action;
			previous_input = flat_input;
			previous_q_value = q_value; //store the previous q value

			flat_input = flatten_input(input, car); //for input to neural net
			Double action = new Double(); //generate a net action

			if (doExploratoryMove()) {
				
				if (gen.nextBoolean()) // x coordinate change
				{
					if (gen.nextBoolean()) {
						q_value = getQValue(0, 0, 0, 1, flat_input);
						action.x = 1;
						current_action = right;
					} else {
						q_value = getQValue(0, 0, 1, 0, flat_input);
						action.x = -1;
						current_action = left;
					}
				} else // y coordinate change
				{
					if (gen.nextBoolean()) {
						q_value = getQValue(0, 1, 0, 0, flat_input);
						action.y = -1;
						current_action = down;
					} else {
						q_value = getQValue(1, 0, 0, 0, flat_input);
						action.y = 1;
						current_action = up;
					}
				}
			} else { // on-policy move
				double[] actionQValues = getActionQValues(flat_input);
				
				if (threeCompare(actionQValues[right], actionQValues[left], actionQValues[down], actionQValues[up])) {
					action.x = 1; 
					q_value = actionQValues[right];
					current_action = right;
				}
				else if (threeCompare(actionQValues[left], actionQValues[right], actionQValues[down], actionQValues[up])) {
					action.x = -1;
					q_value = actionQValues[left];
					current_action = left;
				}
				else if (threeCompare(actionQValues[down], actionQValues[left], actionQValues[right], actionQValues[up])) {
					action.y = -1;
					q_value = actionQValues[down];
					current_action = down;
				}
				else {
					action.y = 1;
					q_value = actionQValues[up];
					current_action = up;
				}
			}
			
			//System.out.println(q_value);
			return action;
		}

		//compare three values and return true if the first value is larger than the other three
		boolean threeCompare(double largest, double value1, double value2, double value3)
		{
			return (largest > value1 && largest > value2 && largest > value3);
		}
		
		//return an array with the q values for each action
		double[] getActionQValues(double[] flat_input)
		{
			double[] result = new double[4];
			result[right] = getQValue(0, 0, 0, 1, flat_input);
			result[left] = getQValue(0, 0, 1, 0, flat_input);
			result[down] = getQValue(0, 1, 0, 0, flat_input);
			result[up] = getQValue(1, 0, 0, 0, flat_input);
			
			return result;
		}
		
		public void save(String fileName) { 
			try { 
				FileOutputStream MyFileStream = new FileOutputStream(fileName); 
				ObjectOutputStream MyObjectStream = new ObjectOutputStream(MyFileStream); 
				MyObjectStream.writeObject(qs); 
				MyObjectStream.close(); 
				System.out.println("Saved Agent " + fileName); 
			} catch (Exception excp) { 
				excp.printStackTrace();
			} 
		}
		
		public void restore(String fileName) {
			ThreeLayerNNet[] result = null;
			try { 
				FileInputStream MyFileStream = new FileInputStream(fileName); 
				ObjectInputStream MyObjectStream = new ObjectInputStream(MyFileStream); 
				result = (ThreeLayerNNet[])MyObjectStream.readObject(); 
			} catch (Exception excp) { 
				excp.printStackTrace();  
			}
			qs = result;
			System.out.println("Restored Agent " + fileName);
		}
		
		//request the q value for a specific action
		double getQValue(double up, double down, double left, double right,double[] flat_input) {
			
			if (up > 0)
				return qs[this.up].propagateForward(flat_input)[0];
			else if (down > 0)
				return qs[this.down].propagateForward(flat_input)[0];
			else if (left > 0)
				return qs[this.left].propagateForward(flat_input)[0];
			else
				return qs[this.right].propagateForward(flat_input)[0];
		}

		// print a debug string if debug = true
		void debugOut(String line) {
			if (debug) {
				System.out.println(line);
			}
		}


		// Function for the environment the give the reward to the agent
		public void putReward(double aReward) {
			
			if (runNr > 1) 
			{
				//add state to e_traces
				addTrace(previous_input);
				
				for(int i = 0; i < e_traces.size(); i++)
				{	
					result[0] = previous_reward + (e_traces.get(i) * q_value) ; //calculate desired q value
					
					qs[previous_action].propagateForwardAndBack(e_traces_states.get(i), result); //train neural network
				}
			}
			
			
			if (enable_e_traces)
			{
			//decay traces
			for(int i = 0; i < e_traces.size(); i++)
			{
				e_traces.set(i,(e_traces.get(i) * discount * lambda));
			}
			
			//delete traces with a low value
			for(int i = 0; i < e_traces.size(); i++)
			{
				if (e_traces.get(i) < minimal_e_trace)
				{
					e_traces.remove(i);
					e_traces_states.remove(i);
				}
			}
			}
			else
			{
				e_traces.clear();
			}
			
			
			previous_reward = aReward; //store reward just received as previous_reward for next time
		}
	

		private void addTrace(double[] state)
		{
			
			//lookup state action pair
			for(int i = 0; i < e_traces.size(); i++)
			{
				if (Arrays.equals(e_traces_states.get(i), state))
				{
							e_traces.set(i,1.0); //replacing traces 
							return;
				}
			}
		
			//add new state-action-pair to traces
			e_traces.add(1.0);
			e_traces_states.add(state);
		}
		


}

	

