package com.robot.topology.core;

import java.util.List;

import com.robot.io.WriteInFile;
import com.robot.topology.graph.domain.Edge;
import com.robot.topology.graph.domain.Graph;
import com.robot.topology.graph.service.GraphManager;
import com.robot.topology.robot.domain.Robot;
import com.robot.topology.robot.service.RobotManagerImpl;
import com.robot.topology.robot.service.solo.RobotSoloManagerImpl;
import com.robot.topology.strategy.deterministic.BushMosteller;
import com.robot.topology.strategy.deterministic.Deterministic;
import com.robot.topology.strategy.deterministic.Incremental;
import com.robot.topology.strategy.deterministic.ShortestPathBM;
import com.robot.topology.strategy.deterministic.ShortestPathInc;

public class GeneralAlgorithm {

	public GeneralAlgorithm() {
	}

	public static void deterministicAlgorithm(Graph graphToExplore, String strategy) throws Exception{
		//Check if the goal is reached
		boolean goalNotReached=true;
		//Store the current path
		List<Edge> currentPath;
		//Number of iteration
		Integer iterationNumber=0;
		//File where the data is stored, name depends on the chosen strategy
		String fileName=null;
		//Chosen strategy, super type: Deterministic
		Deterministic chosenStrategy = null;
		//Length of the path
		Integer currentPathLength = 0;
		//The inversely proportional strategies dont penalize
		Boolean mustPenalize = true;

		//Strategy used
		if ("BM".equals(strategy)){
			chosenStrategy = new BushMosteller();
			fileName="ValuesBushMosteller";
			mustPenalize=true;
		}
		else if ("INC".equals(strategy)){
			chosenStrategy = new Incremental();
			fileName="ValuesIncremental";
			mustPenalize=true;
		}
		else if ("SHORTINC".equals(strategy)){
			chosenStrategy = new ShortestPathInc();
			fileName="ValuesShortestPathInc";
			mustPenalize=false;
		}
		else if ("SHORTBM".equals(strategy)){
			chosenStrategy = new ShortestPathBM();
			fileName="ValuesShortestPathBM";
			mustPenalize=false;
		}
		else throw new Exception("Invalid Option!!");


		// File writer
		//Get an empty file for each run
		WriteInFile writeInformation = new WriteInFile("resources", fileName,"CSV");
		writeInformation.initializeFile();

		//Create robot
		Robot theRobot = new Robot(graphToExplore.getInitialVertex(), graphToExplore.getGoalVertex(), graphToExplore.getEdges());

		//Create Manager for the graph
		GraphManager graphManager = new GraphManager(graphToExplore);

		// Create routine tables and assign to the corresponding Vertexes
		graphManager.createRoutineTables(graphToExplore);

		//Create the robotManager to use the Robot bean
		RobotManagerImpl robotManager = new RobotSoloManagerImpl(theRobot);


		//Do until the graph has converged
		do{
			iterationNumber++;

			//Initialize the robot
			//Put it back to the initial edge, empty the path
			theRobot.initializeRobot();

			//Move the robot into the graph
			//until the robot found the goal
			do {
				//Update the routine tables with the updated values of the weights.
				graphManager.updateRoutineTables(graphToExplore);
				//Move one step in the graph
				robotManager.moveRobotOneStep(iterationNumber);
			} while (goalNotReached=!theRobot.amIInGoalState());

			//We reached the goal update the path
			if (!goalNotReached){
				//we get the path from the robot
				currentPath= theRobot.getCurrentPath().getPath();
				//we calculate its length
				currentPathLength = currentPath.size();
				//and set it in the strategy (used for the inversely proportional strategies)
				chosenStrategy.setPathLength(currentPathLength);

				//we reward each edge of the path used by the robot
				for(Edge currentEdge : currentPath){
					//We backpropagate the reward for each edge used by the robot
					if (iterationNumber == 1)
						//first iteration we write the initial weight (randomly chosen)
						writeInformation.writeTextFile(iterationNumber.intValue()-1+":"+currentEdge.toStringWeight());

					//We update the current edge
					chosenStrategy.updateEdgeToReward(currentEdge);
					System.out.println("I reward : "+currentEdge.toStringWeight());
					//and write its updated value in the file
					writeInformation.writeTextFile(iterationNumber.intValue()+":"+currentEdge.toStringWeight());
				}

				//For the strategies that penalize the unused edges

				//We get the data structure used to store the unused edges
				currentPath = theRobot.getUnusedEdges().getPath();

				//We punish each unused edge
				for(Edge currentEdge : currentPath){

					if (iterationNumber == 1)
						//first iteration we write the initial weight (randomly chosen)
						writeInformation.writeTextFile(iterationNumber.intValue()-1+":"+currentEdge.toStringWeight());

					//We backpropagate the punishment for each edge UNused by the robot
					if (mustPenalize)
						chosenStrategy.updateEdgeToPunish(currentEdge);
					System.out.println("I punish : "+currentEdge.toStringWeight());
					//and write its updated value in the file
					writeInformation.writeTextFile(iterationNumber.intValue()+":"+currentEdge.toStringWeight());
				}


			} 

			//End loop: the graph has converged?
		} while (!graphManager.hasConverged());
		System.out.println("number of iteration : "+iterationNumber);
		//Display the routine table (DEBUG)
		//graphManager.displayRoutineTables(graphToExplore);
	}

	public static void deterministicAlgorithmOneRobot(Graph graphToExplore, String strategy) throws Exception{
		//Check if the goal is reached
		boolean goalNotReached=true;
		//Store the current path
		List<Edge> currentPath;
		//Chosen strategy, super type: Deterministic
		Deterministic chosenStrategy = null;
		//Length of the path
		Integer currentPathLength = 0;
		//The inversely proportional strategies dont penalize
		Boolean mustPenalize = true;

		//Strategy used
		if ("BM".equals(strategy)){
			chosenStrategy = new BushMosteller();
			mustPenalize=true;
		}
		else if ("INC".equals(strategy)){
			chosenStrategy = new Incremental();
			mustPenalize=true;
		}
		else if ("SHORTINC".equals(strategy)){
			chosenStrategy = new ShortestPathInc();
			mustPenalize=false;
		}
		else if ("SHORTBM".equals(strategy)){
			chosenStrategy = new ShortestPathBM();
			mustPenalize=false;
		}
		else throw new Exception("Invalid Option!!");

		//Create robot
		Robot theRobot = new Robot(graphToExplore.getInitialVertex(), graphToExplore.getGoalVertex(), graphToExplore.getEdges());

		//Create Manager for the graph
		GraphManager graphManager = new GraphManager(graphToExplore);

		// Create routine tables and assign to the corresponding Vertexes
		graphManager.createRoutineTables(graphToExplore);

		//Create the robotManager to use the Robot bean
		RobotManagerImpl robotManager = new RobotSoloManagerImpl(theRobot);


		//Initialize the robot
		//Put it back to the initial edge, empty the path
		theRobot.initializeRobot();
		
		//Move the robot into the graph
		//until the robot found the goal
		do {
			//Update the routine tables with the updated values of the weights.
			graphManager.updateRoutineTables(graphToExplore);
			//Move one step in the graph
			robotManager.moveRobotOneStep(0);
		} while (goalNotReached=!theRobot.amIInGoalState());

		//We reached the goal update the path
		if (!goalNotReached){
			//we get the path from the robot
			currentPath= theRobot.getCurrentPath().getPath();
			//we calculate its length
			currentPathLength = currentPath.size();
			//and set it in the strategy (used for the inversely proportional strategies)
			chosenStrategy.setPathLength(currentPathLength);

			//we reward each edge of the path used by the robot
			for(Edge currentEdge : currentPath){
				//We backpropagate the reward for each edge used by the robot
				//We update the current edge
				chosenStrategy.updateEdgeToReward(currentEdge);
			}
			
			//For the strategies that penalize the unused edges
			if (mustPenalize){
				//We get the data structure used to store the unused edges
				currentPath = theRobot.getUnusedEdges().getPath();
				
				//We punish each unused edge
				for(Edge currentEdge : currentPath){
					//We backpropagate the punishment for each edge UNused by the robot
					chosenStrategy.updateEdgeToPunish(currentEdge);
				}
			}
		//End of the routing table updates
		} 

	}
}
