package archive;

import model.Field;
import model.Game;
import model.OthelloBoard;
import neuralnetworks.MultiLayerPerceptron;
import players.NeuralNetworkPlayer;
import players.Player;
import players.RandomPlayer;

public class ExperimentJaap implements Experiment
{

    int     d_trainingSetSize = 10, //training-set size; nr of paths in the tree
            d_learningRuns    = 10000,    //each training-example is used learningRuns/setSize times
            d_testingRuns     = 1000;    //testing runs; more runs gives more accurate result

    double  d_learningRate    = 0.67, //how serious to take new information
            d_lambdaValue     = 0.0,  //past adjustments strength
            d_gammaValue      = 0.95;  //future utility strength

    public void run()
    {
        //temporay data storage
        double[][] inputW = new double[60][64];
        double[][] inputB = new double[60][64];
        
        //the neural net to train
        int[] setup = {64,64,1};
        MultiLayerPerceptron utility = new MultiLayerPerceptron(setup);
        
        // players for each side
        Player player1 = new NeuralNetworkPlayer(utility,null); 
        Player player2 = new NeuralNetworkPlayer(utility,null);
        
        OthelloBoard board = new OthelloBoard();
        for (int epoch = 0; epoch < d_learningRuns; epoch++) 
        {
            if (epoch % 100 == 0) 
            {//periodic testing
                int[] wins = testPlayers(new NeuralNetworkPlayer(utility,null), new RandomPlayer(), 100);
                for (int i = 0; 2*i < wins[Field.WHITE]; i++)
                    System.out.print(" ");
                System.out.print("|" + wins[Field.WHITE]);
                System.out.println(" @ " + (100.0 * epoch) / d_learningRuns + "%");
            }

            // play one game
            board.reset();
            for (int i = 0; i < 3; i++) 
            {//random initialization
                new RandomPlayer().move(board, Field.WHITE);
                new RandomPlayer().move(board, Field.BLACK);
            }
            boolean p1 = true, p2 = true;
            for (int t = 0; p1 || p2; t++) 
            {
                //white: go to next state
                p1 = player1.move(board, Field.WHITE);
                //save the state
                inputW[t] = board.toArray(Field.WHITE);

                //black: go to next state
                p2 = player2.move(board, Field.BLACK);
                //save the state
                inputB[t] = board.toArray(Field.BLACK);

                if (!p1 && !p2) 
                {// game over
                    utility.train(inputW[t - 1], board.getReward(Field.WHITE));
                    utility.train(inputB[t - 1], board.getReward(Field.BLACK));
                    // utility.train(Arrays.copyOf(inputW,t+1), Arrays.copyOf(targetW,t+1));
                    // utility.train(Arrays.copyOf(inputB,t+1), Arrays.copyOf(targetB,t+1));
                } else if (t > 0) 
                {
                    utility.train(inputW[t - 1], utility.process(inputB[t]));
                    utility.train(inputB[t - 1], utility.process(inputW[t]));
                }
            }
        }
        
        int[] result = testPlayers(new NeuralNetworkPlayer(utility,null), new RandomPlayer(), d_testingRuns);
        
        System.out.println();
        for(byte color : Field.values())
            System.out.println(Field.toString(color) + ": " + result[color] + " wins");
        
    }
    
    public int[] testPlayers(Player whitePlayer, Player blackPlayer, int nrGames)
    {
        int[] wins = new int[3];
        
        Game[] games = generateGames(whitePlayer, blackPlayer, nrGames);
        for(Game game : games)
            wins[game.getWinner()]++;
        
        return wins;
    }
    
    public Game[] generateGames(Player whitePlayer, Player blackPlayer, int nrGames)
    {
        OthelloBoard board = new OthelloBoard();
        RandomPlayer rand = new RandomPlayer();
        
        Game[] trainingSet = new Game[nrGames];
        for(int trial = 0; trial < nrGames; trial++)
        {
            board.reset();
            
            //pick a random state from the tree at 6-ply depth
            Game game = new Game();
            for(int i = 0; i < 3; i++)
            {
                game.addBoard(board.toArray(Field.WHITE), Field.WHITE);
                rand.move(board, Field.WHITE);
                game.addBoard(board.toArray(Field.BLACK), Field.BLACK);
                rand.move(board, Field.BLACK);
            }
            
            //play the game
            boolean whiteCouldMove = true, blackCouldMove = true;
            while(whiteCouldMove || blackCouldMove)
            {
                game.addBoard(board.toArray(Field.WHITE), Field.WHITE);
                whiteCouldMove = whitePlayer.move(board, Field.WHITE);
                game.addBoard(board.toArray(Field.BLACK), Field.BLACK);
                blackCouldMove = blackPlayer.move(board, Field.BLACK);
            }
            game.setWinner(board.bestPlayer());
            
            game.setReward(board.getReward(Field.WHITE), Field.WHITE); //TODO: maybe store 2 rewards?
            game.setReward(board.getReward(Field.BLACK), Field.BLACK);
            
            trainingSet[trial] = game;
        }
        return trainingSet;
    }



}
