package players;

import java.util.ArrayList;
import java.util.Random;

import experiments.ExperimentRunner;

import neuralnetworks.NeuralNetwork;

import model.OthelloBoard;
import model.Position;

public class NeuralNetworkPlayer implements Player
{
    NeuralNetwork[] d_neuralnetwork;
	boolean d_doExploration;

	//the experimentrunner for getting the current tau
	ExperimentRunner d_expRun;
	
	static Random r = new Random(); //for the exploration chance
	
    public NeuralNetworkPlayer(NeuralNetwork neuralnetwork, ExperimentRunner expRun) 
    {
    	d_neuralnetwork = new NeuralNetwork[1];
        d_neuralnetwork[0] = neuralnetwork;
		d_expRun = expRun;
		d_doExploration = false; //deze heeft een eigen setter        
    }
    
    public NeuralNetworkPlayer(NeuralNetwork[] neuralnetworks, ExperimentRunner expRun) 
    {
        d_neuralnetwork = neuralnetworks;
		d_expRun = expRun;
		d_doExploration = false; //deze heeft een eigen setter
    }
    
    public boolean move(OthelloBoard board, byte color) 
    {
        // get all possible transitions
        ArrayList<Position> moves = board.getTransitions(color);
        if (moves.size() == 0)
        {
            return false;
        }      
        
        // find the best one
        double bestScore = -10.0; //Double.MIN_VALUE;
        int chosenMove = -1;
        double[] allScores = new double[moves.size()]; //store all scores to do roulettewheel selection
		
        for(int loop = 0; loop < moves.size(); loop++)
        {
        	Position move = moves.get(loop);
        	
            // transition nr k
            OthelloBoard newState = board.getClone();
            newState.makeTransition(move, color);
            
            // evaluate new state
            double score = evaluate(newState, color);
            
            //save score
            allScores[loop] = score;
            if(score > bestScore)
            {
                bestScore = score;
                chosenMove = loop;
            }
        }
        
		//calculate the boltzman probabilities of each move:
		double totalFitness = 0;
		for(int loop = 0;loop < moves.size(); ++loop ) {
			allScores[loop] = Math.exp(allScores[loop] / d_expRun.getTau() ); //Boltzman (c not used, see roulette impl. below)
			
			//get the total amount of fitness (needed for the roulette wheel):
			totalFitness+= allScores[loop]; 
		}
		
		int chosenIndex = chosenMove;
		
		if (d_doExploration) 
		{
			//do roulettewheel selection on the possible moves:
			double wheel_position = r.nextDouble() * totalFitness;
			
			//hier kijken in welk vak van het wheel we zitten, en die move kiezen:
			double probCounter = 0.0;
			for(int loop = 0;loop < moves.size(); ++loop ) 
			{
				probCounter += allScores[loop];
				if (wheel_position < probCounter) 
				{ //dan is dit de gekozen move
					chosenIndex = loop;
					break; //afbreken, anders krijg je altijd de laatste
				}
			}
		}

        // transition to 'best' new state
        board.makeTransition(moves.get(chosenIndex), color);
        return true;
    }
    
    public double evaluate(OthelloBoard board, byte color)
    {
    	double score = 0.0;
    	for(int n = 0; n < d_neuralnetwork.length; n++)
	    {
    		//int networkIdx = board.getTime() * d_neuralnetwork.length / 61;
	        double [] input  = board.toArray(color);
	        double [] output = d_neuralnetwork[n].process(input);
	        
	        double d = Math.abs(board.getTime() - (n + 0.5)*64/d_neuralnetwork.length);
	        double weight = Math.exp(-d*d / 128);
	        score += weight * output[0];
    	}
        return score;
    }
    
    public String toString()
	{
		return "NeuralNetwork";
	}
    
	public void setExploration(boolean aan) 
	{
		d_doExploration = aan;
	}
}
