package othello.players.learnplayers;

import static java.lang.Math.exp;

import java.util.ArrayList;
import java.util.List;
import java.util.ListIterator;
import java.util.Random;
import java.awt.Color;

import othello.model.Action;
import othello.model.IBoard;
import othello.model.Move;
import othello.model.State;
import othello.neuralnetwork.NeuralNetwork;

// Base class for QLearing and SARSA, as they are almost the same
public abstract class AbstractActionPlayer extends AbstractNeuralNetworkPlayer {
    private static final double discountRate = 1;
    private static final double lambda_decay = 0.75;
    private static final double tBase = 0.9999995;    // got the t-values from a paper
    private static final double tThreshold = 0.002;
    private static final double epsInit = 0.1;
    private double eps = epsInit;
    private final boolean softmax = false;
    private final boolean epsgreedy = true;
    private List<Move> previousMoves = new ArrayList<Move>();
    protected Action lastAction;
    protected State lastState;
    
    private static final int[][] positionValues = 
		new int[][]{{100, -20, 10,  5,  5, 10, -20, 100},
					 {-20, -50, -2, -2, -2, -2, -50, -20},
                     { 10,  -2, -1, -1, -1, -1,  -2,  10},
                     {  5,  -2, -1, -1, -1, -1,  -2,   5},
                     {  5,  -2, -1, -1, -1, -1,  -2,   5},
                     { 10,  -2, -1, -1, -1, -1,  -2,  10},
                     {-20, -50, -2, -2, -2, -2, -50, -20},
                     {100, -20, 10,  5,  5, 10, -20, 100}};
    
	private static final int[][] benchValues =
		new int[][]{{ 80, -26,  24,  -1,  -5,  28, -18,  76},
                     {-23, -39, -18,  -9,  -6,  -8, -39,  -1},
                     { 46, -16,   4,   1,  -3,   6, -20,  52},
                     {-13,  -5,   2,  -1,   4,   3, -12,  -2},
                     { -5,  -6,   1,  -2,  -3,   0,  -9,  -5},
                     { 48, -13,  12,   5,   0,   5, -24,  41},
                     {-27, -53, -11,  -1, -11, -16, -58, -15},
                     { 87, -25,  27,  -1,   5,  36,  -3, 100}};
                     
    private static final int[][] heurValues =
		new int[][]{{100,-25, 10,  5,  5, 10,-25,100},
                     {-25,-25,  2,  2,  2,  2,-25,-25},
                     { 10,  2,  5,  1,  1,  5,  2, 10},
                     {  5,  2,  1,  2,  2,  1,  2,  5},
                     {  5,  2,  1,  2,  2,  1,  2,  5},
                     { 10,  2,  5,  1,  1,  5,  2, 10},
                     {-25,-25,  2,  2,  2,  2,-25,-25},
                     {100,-25, 10,  5,  5, 10,-25,100}};
    
    public AbstractActionPlayer(String name, NeuralNetwork neuralNetwork) {
		super(name, neuralNetwork);
	}
    
    @Override
    public void endGame() {
        // Update NN
        if (useEligibility) {
            double lambda = 1;
            
            ListIterator<Move> li = previousMoves.listIterator(previousMoves.size());
            Move move;
            Action prevAction;
            double newQ = reward(board);
            
            while(li.hasPrevious()) {
                move = li.previous();
                prevAction = move.getAction();
                double prevQ = prevAction.getQValue();			        
                double Qtarget = discountRate * newQ * lambda;
                learn(Qtarget, prevQ, lastAction, lastState);
                lambda *= lambda_decay;                
            }
        } else if (lastAction != null) {            
            double lastQ = lastAction.getQValue();
            double newQ = reward(board);            
            double Qtarget = discountRate * newQ;
            learn(Qtarget, lastQ, lastAction, lastState);	
        }    	
        
        lastAction = null;
        lastState = null;
    	
        // Clear the previous moves array for trace elicibility
    	this.previousMoves.clear();
    }
    
    protected Action selectAction(double[] output, IBoard brd){
        List<Action> availableActions = getAvailableActions(brd);
        Action bestAction = null;
        Random generator = new Random();
        
        if (availableActions.isEmpty()){
            return null;
        }
        
        if(this.benchmarkState == BenchmarkState.Benchmarking || this.benchmarkState == BenchmarkState.Playing){
        	if (board.getTurns() < 8 && board.getRandFirstFourMoves()){
                bestAction = availableActions.get(generator.nextInt(availableActions.size()));
            } else {
                bestAction = getMaxMove(availableActions, output);
            }
        } else if(epsgreedy){
        	bestAction = getEpsGreedyMove(availableActions, output);
        } else if (softmax) {
        	bestAction = getSoftMaxMove(availableActions, output);
        }
        
        return bestAction;
    }
    
    protected Action selectActionPos()
	{		
		if (board.getTurns() < 8 && (board.getBenchmarkstate() == BenchmarkState.Benchmarking && board.getRandFirstFourMoves()))
    		return selectRandomAction();		
        else {                			
			int maxEval = Integer.MIN_VALUE;
			int bestR = -1;
			int bestC = -1;
			int eval;
			boolean inEndGame = checkEndGame(board);			
					
			
			for (int r = 0; r < board.getRowCount(); r++) {
				for (int c = 0; c < board.getColCount(); c++) {
					if (board.isValidMove(getColor(), r, c)) {    
						IBoard hypoBrd = board.hypotheticallyPlacePiece(this.getColor(), r, c);
						if (inEndGame){
							eval = evalEndgame(hypoBrd);
						} else {
							eval = evalWeighted(hypoBrd, positionValues);
						}
						if (eval > maxEval){
							maxEval = eval;
							bestR = r;
							bestC = c;
						}
					}
				}         
			}
			
			if (bestR == -1){
				return null;
			}
			
			return new Action(bestR,bestC);
		}
	}

    protected Action selectActionPos(int[][] values)
	{		
		if (board.getTurns() < 8 && (board.getBenchmarkstate() == BenchmarkState.Benchmarking && board.getRandFirstFourMoves()))
    		return selectRandomAction();		
        else {                			
			int maxEval = Integer.MIN_VALUE;
			int bestR = -1;
			int bestC = -1;
			int eval;
			boolean inEndGame = checkEndGame(board);			
					
			
			for (int r = 0; r < board.getRowCount(); r++) {
				for (int c = 0; c < board.getColCount(); c++) {
					if (board.isValidMove(getColor(), r, c)) {    
						IBoard hypoBrd = board.hypotheticallyPlacePiece(this.getColor(), r, c);
						eval = evalWeighted(hypoBrd, values);						
						if (eval > maxEval){
							maxEval = eval;
							bestR = r;
							bestC = c;
						}
					}
				}         
			}
			
			if (bestR == -1){
				return null;
			}
			
			return new Action(bestR,bestC);
		}
	}
	
	protected Action selectActionHeur()
	{
		return selectActionPos(heurValues);
	}
	
	protected Action selectActionBench()
	{
		return selectActionPos(benchValues);
	}
	
	protected Action selectRandomAction() {
		List<Action> availableActions = getAvailableActions(board);
		Action bestAction = null;
		Random generator = new Random();
		
		if (!availableActions.isEmpty())
			bestAction = availableActions.get(generator.nextInt(availableActions.size()));		
		
		return bestAction;	
    }
    
    protected Action getEpsGreedyMove(List<Action> availableActions, double[] output) {
    	Random generator = new Random();
    	Action bestAction = null;
    	eps = epsInit*(double)(board.getGamesLeft())/(double)totalGames;
          
    	if (generator.nextDouble() < eps){
            Action randomAction = availableActions.get(generator.nextInt(availableActions.size()));
            double value = output[actionIndex(randomAction)];
            randomAction.setQValue(value);
            return randomAction;
        }

        double max =  -1.0 * Double.MIN_VALUE;
        double QValue;

        for (Action a : availableActions){
            QValue = output[actionIndex(a)];
            if (QValue > max){
                max = QValue;
                bestAction = a;
            }
        }
        
        if (bestAction != null){
        	bestAction.setQValue(max);
        }	      
        
        return bestAction;
    }
    
    protected Action getSoftMaxMove(List<Action> availableActions, double[] output) {
    	Random generator = new Random();
    	Action bestAction = null;
    	double quotient = 0.0;    	
    	double gamesPlayed = (totalGames - board.getGamesLeft())*15000000.0/totalGames;
    	double T = Math.pow(tBase,gamesPlayed);
    	
    	if (T < tThreshold)
    	{
    		T = 0.0;
    	}       	
    	
    	if (T < tThreshold){	//Just take max when T is below threshold
	        double max = -1.0 * Double.MAX_VALUE;
	        double QValue;
	
	        for (Action a : availableActions){
	            QValue = output[actionIndex(a)];
	            if (QValue > max){
	                max = QValue;
	                bestAction = a;
	                bestAction.setQValue(max);
	            }
	        }
    		
    	} else {
    		for (Action a : availableActions){
        		quotient += exp(output[actionIndex(a)]/T);
        	}
        	
        	double rnd = generator.nextDouble();
        	double probA = 0.0;
        	
        	boolean first = true;
        	
        	for (Action a : availableActions){
        		int actionIndex = actionIndex(a);
        		probA += exp(output[actionIndex]/T)/quotient;
        		if (probA >= rnd && first){
        			bestAction = a;
        			a.setQValue(output[actionIndex]);
        			first = false;
        		}
        	}
    	}
    	
    	return bestAction;
    }

    protected Action getMaxMove(List<Action> availableActions, double[] output) {
    	Action bestAction = null;    	          
        
        double max =  -1.0 * Double.MIN_VALUE;
        double QValue;

        for (Action a : availableActions){
            QValue = output[actionIndex(a)];
            if (QValue > max){
                max = QValue;
                bestAction = a;
            }
        }
        
        if (bestAction != null){
        	bestAction.setQValue(max);
        }	      
        
        return bestAction;
    }
    
    protected void updateNN(State state, Action newAction) {
    	// Update NN
    	double lastQ;
        if (useEligibility) {
        	previousMoves.add(new Move(state, newAction));
            double lambda = 1;
            
            ListIterator<Move> li = previousMoves.listIterator(previousMoves.size());
            Move move = li.previous();	// Go one step back, since we skip the latest addition 
                                        // otherwise we have no next action/state pair
            Action prevAction;
            double newQ = 0.0;
            if (newAction != null){
                newQ = newAction.getQValue();
            }
            
            while(li.hasPrevious()) {
                move = li.previous();
                prevAction = move.getAction();
                double prevQ = prevAction.getQValue();			        
                double Qtarget = discountRate * newQ * lambda;
                learn(Qtarget, prevQ, lastAction, lastState);
                lambda *= lambda_decay;
            }
        } else {
            lastQ = lastAction.getQValue();
            double newQ = 0.0;
            if (newAction != null){
                newQ = newAction.getQValue();
            }
            double Qtarget = discountRate * newQ;
            learn(Qtarget, lastQ, lastAction, lastState);            
        }
    }
    
    protected void learn(double Qtarget, double lastQ, Action lastAction, State lastState) {
        // adjust the nn by backprop of error qTarget - qOutput
        double error = Qtarget - lastQ;        
        nn.forwardPropagate(lastState.toDoubles());
        nn.backPropagate(error, nnLR, actionIndex(lastAction));
    }
    
    protected int evalWeighted(IBoard brd, int[][] values) {
        int eval = 0;
        for (int r = 0; r < brd.getRowCount(); r++) {
            for (int c = 0; c < brd.getColCount(); c++) {
                eval += values[r][c] * positionOccupance(brd,r,c);
            }
        }
        return eval;
    }
    
    protected int evalEndgame(IBoard brd) {
        int eval = 0;
        for (int r = 0; r < brd.getRowCount(); r++) {
            for (int c = 0; c < brd.getColCount(); c++) {
                eval += positionOccupance(brd,r,c);
            }
        }
        return eval;
    }
    
    protected int positionOccupance(IBoard brd, int r, int c) {
        Color color = brd.playerAt(r,c);
        if (color == null){
            return 0;
        }
        if (color.equals(this.getColor())) {
            return 1;
        }
        
        return -1;
    }
    
    protected double percentOccupied(IBoard brd){
        double count = 0.0;
        for (int r = 0; r < brd.getRowCount(); r++) {
            for (int c = 0; c < brd.getColCount(); c++) {
                count += positionOccupance(brd,r,c)*positionOccupance(brd,r,c);
            }
        }                    
        return (count/64d)*100d;
    }
    
    protected boolean checkEndGame(IBoard brd) {
        int cornersCovered  =   positionOccupance(brd,0,0)*positionOccupance(brd,0,0) +
                                positionOccupance(brd,0,7)*positionOccupance(brd,0,7) +
                                positionOccupance(brd,7,0)*positionOccupance(brd,7,0) +
                                positionOccupance(brd,7,7)*positionOccupance(brd,7,7);
                                
        return (cornersCovered == 4) || (percentOccupied(brd) >= 80.0) ;
    }

	@Override
	public abstract void beginTurn();

	@Override
	public abstract void endTurn();
}
