package experiments;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Scanner;

import players.*;

import model.Field;
import model.OthelloBoard;
import neuralnetworks.MultiLayerPerceptron;
import neuralnetworks.OutputFunction.Type;

public class ExperimentRunner 
{
	boolean verbose;
	
	int repeats;
	
	MultiLayerPerceptron[] utilityFunctions_white; //one function for each episode of a game
	MultiLayerPerceptron[] utilityFunctions_black; //one function for each episode of a game
	int networks;                  // length of utility arrays
	boolean use_single_utility;    // will make utilities for white and black the same
	double exploration_prob;
	String continue_run = null;    // train using an older trained network as starting point
	
	int[] setup;
	double alpha, gamma, lambda;
	double tau;                    //decay of learning_rate
	
	int initial_moves;             //nr of random initial moves (per player per game)
	Player initializer = new RandomPlayer();
	
	int epoch; 						//this is now a global var, to be able to use a getter
	
	String train_player_1;
	String train_player_2;
	
	int training_games;            //total nr of games to train on
	int batch_size;			       //train after seen ... nr of games
	
	String[] test_players;

	int testing_games;             //nr of games to test on
	int test_interval;             //test after seen ... nr of games
	
	boolean symmetry_removal;	   // We do not remove the symetries if symetry_removal is 1 (true).
	int input_representation;	   // Determines if we expect 64, 128 or 192 inputs.
	
	public ExperimentRunner(String inputFile) throws IOException
	{
		// get name of input file
		String fileName = inputFile.substring(0, inputFile.lastIndexOf(".in"));
		
		// collect input.
		// networks, players and board are not yet initialized
		readExperiment("../src/experiments/default.in");
		if(verbose) System.out.println("\nsetting experiment values . . .\n");
		readExperiment(inputFile);  //overwrite with file
		
		// everything is set up in the experiment
		for(int repeat = 0; repeat < repeats; repeat++)
			runExperiment(fileName, repeat);
	}
	
	private void runExperiment(String fileName, int repeat) throws IOException
	{
		// Create new file names to write results to
		String outputFile = fileName + "." + repeat + ".out"; 
		String networkFile = fileName + "_" + repeat + ".m";
		
		File outFile = new File(outputFile);
		File tempFile = new File(outputFile + ".temp"); 
		
		// see if it needs to be done
		if(verbose) System.out.println("\ntrying " + outFile.getName() + " . . .");
		if(outFile.exists() || tempFile.exists()) 
		{
			if(verbose) System.out.println("already busy or done.");
			return; //experiment is already done, or already running
		}
		if(verbose) System.out.println("still has to be done. running . . .");
		
		//get temp-file writer ready
		FileWriter writer = new FileWriter(tempFile); 
		tempFile.deleteOnExit();
		
		//get the networks ready
		String old_file = continue_run + "_" + repeat + "_nets.m";
		initNetworks(old_file);
		
		//get players ready
		Player player_white = getPlayer(train_player_1, Field.WHITE);
		Player player_black = getPlayer(train_player_2, Field.BLACK);
		
		//we are going to train, so enable exploration for minimaxplayers:
		player_white.setExploration(true);
		player_black.setExploration(true);
		
		//get board ready
		OthelloBoard board = new OthelloBoard();
		board.setInputRepresentation(input_representation);
		board.setSymmetryRemoval(symmetry_removal);
		
		//GO:
		for (epoch = 0; epoch < training_games; epoch++) 
		{	
			// update learning rate: depends on epoch
			for(int n = 0; n < networks; n++)
			{
				utilityFunctions_white[n].setLearningRate(alpha * Math.exp(-epoch / (tau * training_games)));
				utilityFunctions_black[n].setLearningRate(alpha * Math.exp(-epoch / (tau * training_games)));
			}
			
			// initialize board: TODO: should network learn from this?
			board.reset();
			for (int i = 0; i < initial_moves; i++) 
			{
				initializer.move(board, Field.WHITE);
				initializer.move(board, Field.BLACK);
			}

			// play the game
			double[] prevStateWhite = null;
			boolean p1 = true, p2 = true;
			while( p1 || p2 ) 
			{
				//TODO: rewrite the retrieval and calling of these networks
				//      maybe use all networks at once with some weighting: 
				//                    V(s(t)) ~ exp(-|t - t1|)*V1(s) + ... + exp(-|t - tn|)*Vn(s)
				
				// get the networks for this phase of the game
				// index should be the same as in NeuralNetworkPlayer!
				int networkIdx_white = board.getTime() * utilityFunctions_white.length / 61;
				MultiLayerPerceptron utility_white = utilityFunctions_white[networkIdx_white];
				
				//get black network; could be the same as white
				int networkIdx_black = board.getTime() * utilityFunctions_black.length / 61;
				MultiLayerPerceptron utility_black = utilityFunctions_black[networkIdx_black];
				
				//save state
				double[] prevStateBlack = board.toArray(Field.BLACK);
				
				//white: go to next state
				p1 = player_white.move(board, Field.WHITE);

				/* end game weight changes */
				if (board.isFinalState()) 
				{
//for(int n = 0; n < networks; n++)
//{
//double d = Math.abs(board.getTime() - (n + 0.5)*64/networks);
//double weight = Math.exp(-d*d / 128);
//utilityFunctions_white[n].collectWeight(board.toArray(Field.WHITE), gamma * weight * board.getReward(Field.WHITE));
//utilityFunctions_black[n].collectWeight(prevStateBlack,             gamma * weight * board.getReward(Field.BLACK));
//}
					utility_white.collectWeight(board.toArray(Field.WHITE), gamma * board.getReward(Field.WHITE));
					utility_black.collectWeight(prevStateBlack,             gamma * board.getReward(Field.BLACK));
					break; //quit this game
				}
				
				/* mid game weight changes */
				double[] currentStateWhite = board.toArray(Field.WHITE);
				if(prevStateWhite != null) //null only on first move				
//for(int n = 0; n < networks; n++)
//{
//	double d = Math.abs(board.getTime() - (n + 0.5)*64/networks);
//	double weight = Math.exp(-d*d / 128);
//utilityFunctions_white[n].collectWeight(prevStateWhite, gamma * weight * utilityFunctions_white[n].process(currentStateWhite)[0]);
//}
					utility_white.collectWeight(prevStateWhite, gamma * utility_white.process(currentStateWhite)[0]);
				
				//save state
				prevStateWhite = currentStateWhite;
				
				//black: go to next state
				p2 = player_black.move(board, Field.BLACK);
				
				/* end game weight changes */
				if (board.isFinalState()) 
				{
//for(int n = 0; n < networks; n++)
//{
//	double d = Math.abs(board.getTime() - (n + 0.5)*64/networks);
//	double weight = Math.exp(-d*d / 128);
//utilityFunctions_white[n].collectWeight(board.toArray(Field.WHITE), gamma * weight * board.getReward(Field.WHITE));
//utilityFunctions_black[n].collectWeight(prevStateBlack,             gamma * weight * board.getReward(Field.BLACK));
//}
					utility_white.collectWeight(prevStateWhite,             gamma * board.getReward(Field.WHITE));
					utility_black.collectWeight(board.toArray(Field.BLACK), gamma * board.getReward(Field.BLACK));
					break; //quit this game
				}
				
				/* mid game weight changes */
				double[] currentStateBlack = board.toArray(Field.BLACK);
//for(int n = 0; n < networks; n++)
//{
//	double d = Math.abs(board.getTime() - (n + 0.5)*64/networks);
//	double weight = Math.exp(-d*d / 128);
//utilityFunctions_black[n].collectWeight(prevStateBlack, gamma * weight *  utilityFunctions_black[n].process(currentStateBlack)[0]);
//}	
				utility_black.collectWeight(prevStateBlack, gamma * utility_black.process(currentStateBlack)[0]);
			}
			
			/* end of game: */
			
			//periodic learning
			if(epoch % batch_size == 0)
			{
				for(int i = 0; i < utilityFunctions_white.length; i++)
					utilityFunctions_white[i].applyWeights(); //also sets delta_weight's to 0
				
				if(!use_single_utility) //no use in learning nothing
					for(int i = 0; i < utilityFunctions_black.length; i++)
						utilityFunctions_black[i].applyWeights();
			}
			
			//periodic testing
			if (epoch % test_interval == 0) 
			{
				if(verbose) System.out.print(((int)(1000.0*epoch / training_games))/10.0 + "% "); 
				
				//test against all test players
				for(int p = 0; p < test_players.length; p++)
				{
					//get players ready
					Player test_white = getPlayer("NeuralNetwork", Field.WHITE);
					Player test_black = getPlayer(test_players[p], Field.BLACK);				
					
					//we are going to test, so disable exploration for minimaxplayers:
					test_white.setExploration(false);
					test_black.setExploration(false);
					
					//play the games and switch starting positions
					int[] white_vs_black = testPlayers(test_white,test_black, testing_games/2);
					int[] black_vs_white = testPlayers(test_black,test_white, testing_games/2);
					
					//and reset after testing
					test_white.setExploration(true);
					test_black.setExploration(true);
					
					//total the scores
					int w = white_vs_black[Field.WHITE] + black_vs_white[Field.BLACK];
					int b = white_vs_black[Field.BLACK] + black_vs_white[Field.WHITE];
					int e = white_vs_black[Field.EMPTY] + black_vs_white[Field.EMPTY];
					
					//write to temp-file
					writer.write(w + " " + b + " " + e + " ");
					if(verbose) System.out.print(w + " " + b + " " + e + ",   ");
				}
				//finalize line
				writer.write("\n");
				if(verbose) System.out.println("alpha = " + alpha * Math.exp(-epoch / (tau*training_games)));
			}
		}
		
		/* end of experiment: */
		
		//finalize data file
		writer.close();
		tempFile.renameTo(outFile); 
		//tempFile is automatically deleted on exit
		
		//write networks to file
		FileWriter networkWriter = new FileWriter(new File(networkFile));
		{
			networkWriter.write(utilityFunctions_white.length + "\n");
			for(MultiLayerPerceptron p : utilityFunctions_white)
				networkWriter.write(p.toString() + "\n");
		}
		if(!use_single_utility)
		{
			networkWriter.write(utilityFunctions_black.length + "\n");
			for(MultiLayerPerceptron p : utilityFunctions_black)
				networkWriter.write(p.toString() + "\n");
		}
		networkWriter.close();
	}
	
	private int[] testPlayers(Player player_white, Player player_black, int nGames)
	{
		//wins for white, black or nobody
		int[] wins = new int[3];
		
		OthelloBoard board = new OthelloBoard();
		board.setInputRepresentation(input_representation);
		board.setSymmetryRemoval(symmetry_removal);
		
		for(int trial = 0; trial < nGames; trial++)
        {
			//initialize:
        	board.reset();
        	for(int i = 0; i < 4; i++)
        	{// fixed on 4 initial random moves per player
        		initializer.move(board, Field.WHITE);
        		initializer.move(board, Field.BLACK);
        	}
        	//play the game
	        boolean whiteCouldMove = true, blackCouldMove = true;
	        while(whiteCouldMove || blackCouldMove)
	        {
	        	whiteCouldMove = player_white.move(board, Field.WHITE);
	        	blackCouldMove = player_black.move(board, Field.BLACK);
	        }
	        wins[board.bestPlayer()]++;
        }
		return wins;
	}
	
	private void initNetworks(String networkFile) throws FileNotFoundException
	{
		//initialize utility functions for white:
		if(continue_run != null)
		{ 
			//use network from file as starting point
			if(verbose) System.out.println("Reading old networks for white.");
			alpha = alpha*Math.exp(-1/tau);        //set alpha back to where it was
			utilityFunctions_white = readNetworks(networkFile); //read network from file
		}
		else{
			// start new networks
			if(verbose) System.out.println("Making brand new networks for white.");
			utilityFunctions_white = new MultiLayerPerceptron[networks];
			for(int i = 0; i < utilityFunctions_white.length; i++)
			{
				utilityFunctions_white[i] = new MultiLayerPerceptron(setup);
				utilityFunctions_white[i].setDiscounting(gamma);
				utilityFunctions_white[i].setEligibility(lambda);
				utilityFunctions_white[i].setLearningRate(alpha);
				utilityFunctions_white[i].setOutputFunction(Type.TANH);
			}
		}
		
		// initialize utility functions for black:
		if(use_single_utility)
		{
			if(verbose) System.out.println("Making black networks the same as white.");
			utilityFunctions_black = utilityFunctions_white;
		} else
		{
			if(verbose) System.out.println("Making brand new networks for black.");
			utilityFunctions_black = new MultiLayerPerceptron[networks];		
			for(int i = 0; i < utilityFunctions_black.length; i++)
			{
				utilityFunctions_black[i] = new MultiLayerPerceptron(setup);
				utilityFunctions_black[i].setDiscounting(gamma);
				utilityFunctions_black[i].setEligibility(lambda);
				utilityFunctions_black[i].setLearningRate(alpha);
				utilityFunctions_black[i].setOutputFunction(Type.TANH);
			}	
		}
	}
	
	public MultiLayerPerceptron[] readNetworks(String file) throws FileNotFoundException 
	{
		// read network from file
		if(verbose) System.out.print("trying to read networks from " + file + " . . . ");
		Scanner sc = new Scanner(new File(file));
		int nNets = sc.nextInt();

		//multiple neural nets for each episode of a game
		MultiLayerPerceptron[] nets = new MultiLayerPerceptron[nNets];
		for (int n = 0; n < nNets; n++)
		{
			nets[n] = new MultiLayerPerceptron(sc);
			nets[n].setDiscounting(gamma);
			nets[n].setEligibility(lambda);
			nets[n].setLearningRate(alpha);
			nets[n].setOutputFunction(Type.TANH);
		}

		sc.close();
		if(verbose) System.out.println("succesfully loaded " + nNets + " networks.");
		return nets;
	}
	
	private void readExperiment(String inputFile) throws FileNotFoundException
	{
		//see for an example input file in experiments/example.in
		Scanner in = new Scanner(new File(inputFile));
		while (in.hasNextLine()) {
			// remove comments (#) and get information (:)
			// info is <var_name, var_value> pair
			String[] info = in.nextLine().split("#")[0].split(":");

			if (info.length > 1) {
				setData(strip(info[0]), strip(info[1]));
			}
		}
	}
	
	private void setData(String name, String value)
	{
		if(name.equals("verbose"))
			verbose = strip(value).equals("true");			
		
		else if(name.equals("networks"))
			networks = Integer.parseInt(value);
		
		else if(name.equals("use_single_utility"))
			use_single_utility = value.equals("true");
		
		else if(name.equals("setup"))
		{
			String[] setupS = value.split(",");
			setup = new int[setupS.length];
			for(int i = 0; i < setup.length; i++)
				setup[i] = Integer.parseInt(setupS[i]);
		}
		
		else if(name.equals("exploration_prob"))
			exploration_prob = Double.parseDouble(value);
		
		else if(name.equals("continue_run"))
			continue_run = value;
		
		else if(name.equals("repeats"))
			repeats = Integer.parseInt(value);
		
		else if(name.equals("tau"))
			tau = Double.parseDouble(value); 
		
		else if(name.equals("alpha"))
			alpha = Double.parseDouble(value);
		else if(name.equals("gamma"))
			gamma = Double.parseDouble(value);
		else if(name.equals("lambda"))
			lambda = Double.parseDouble(value);
		
		else if(name.equals("initial_moves"))
			initial_moves = Integer.parseInt(value);
		
		else if(name.equals("training_games"))
			training_games = Integer.parseInt(value);
		else if(name.equals("batch_size"))
			batch_size = Integer.parseInt(value);
		
		else if(name.equals("testing_games"))
			testing_games = Integer.parseInt(value);
		else if(name.equals("test_interval"))
			test_interval = Integer.parseInt(value);
		
		else if(name.equals("train_player_1"))
			train_player_1 = value;
		else if(name.equals("train_player_2"))
			train_player_2 = value;
		
		else if(name.equals("test_players"))
			test_players = value.split(",");
		
		else if(name.equals("symmetry_removal"))
			symmetry_removal = value.equals("true");
		else if(name.equals("input_representation"))
			input_representation = Integer.parseInt(value);
		else //ERROR
		{
			System.out.println("ERROR: There is something wrong in the input file with " + name + ":" + value + ". Exiting program.");
			System.exit(0);
		}
		if(verbose) System.out.println(name + " = " + value + " . . . ok");
	}
	
	private Player getPlayer(String str, byte color)
	{
		if(str.equals("Random"))
			return new RandomPlayer();
		if(str.equals("Positional"))
			return new PositionalPlayer();
		if(str.equals("Mobility"))
			return new MobilityPlayer();
		if(str.equals("NeuralNetwork"))
		{
			if(color == Field.WHITE)
				return new NeuralNetworkPlayer(utilityFunctions_white, this);
			if(color == Field.BLACK)
				return new NeuralNetworkPlayer(utilityFunctions_black, this);
		}
		if(str.startsWith("Minimax"))
		{
			String[] playerInfo = str.split(",");
			MinimaxPlayer player = new MinimaxPlayer(getPlayer(playerInfo[1], color), Integer.parseInt(playerInfo[2]));
			return player;
		}
		//ERROR:
		System.out.println("ERROR: There is something wrong with player " + str);
		System.exit(0);
		return null;
	}
	
	public double getTau() {
		//returns the probablity that minimax player wil explore
		return Math.exp(-epoch / (tau*training_games));
	}
	
	
	private String strip(String str)
	{
		String ret = "";
		for(int i = 0; i < str.length(); i++)
			if(str.charAt(i) != ' ')
				ret += str.charAt(i);
		return ret;
	}
	
	public static void main(String[] args) throws IOException 
	{
		new ExperimentRunner(args[0]);
	}

}
