package ee.ut.aa.neuraltic.model;

import java.util.Arrays;
import java.util.List;

import org.apache.log4j.Logger;

import ee.ut.aa.neuraltic.logic.TicEvaluator;
import ee.ut.aa.neuraltic.neural.Network;

public class NeuralPlayer implements Player {

	private static Logger log = Logger.getLogger( NeuralPlayer.class );

	public static final int DEPTH = 2;

	private int value;
	private Network network;

	private TicEvaluator evaluator;

	public NeuralPlayer( int value, Network network ) {

		this.value = value;
		this.network = network;
		this.evaluator = TicEvaluator.getInstance();
	}

	@Override
	public Board makeNextMove( Board board ) {

		log.debug( "Requesting next move for player " + value );

		int[] childFields = board.getFields().clone();
		int nextPlayer = board.getNextPlayer();

		Board child;

		// Reverse the board if player is opponent
		if( value == Board.PLAYER_TWO ) {

			log.debug( "Reversing board fields" );

			for( int i = 0; i < board.getSize(); i++ ) {
				childFields[i] = -1 * childFields[i];
			}

			nextPlayer = Board.PLAYER_ONE;

			child = new TicBoard( childFields, nextPlayer );

		} else {

			child = board;
		}

		int bestMove = miniMax( child );

		log.debug( "Next move is: " + bestMove );

		board.makeMove( bestMove );

		log.debug( "New board state is: " + Arrays.toString( board.getFields() ) );

		return board;
	}

	private int miniMax( Board board ) {

		log.debug( "Entering minimax" );

		List<Integer> legalMoves = board.getLegalMoves();

		int bestMove = legalMoves.get( 0 );
		double bestValue = Double.NEGATIVE_INFINITY;

		log.debug( "WTF - bestValue=" + bestValue );

		for( Integer legalMove : legalMoves ) {

			int[] childFields = board.getFields().clone();
			int nextPlayer = board.getNextPlayer();

			Board child = new TicBoard( childFields, nextPlayer );
			child.makeMove( legalMove );

			double childValue = -miniMaxRec( child, 0 );

			if( log.isDebugEnabled() )
				log.debug( "childValue=" + String.format( "%.5f", childValue ) + " for move=" + legalMove
						+ " on board=" + Arrays.toString( child.getFields() ) );

			// If we find better evaluation to some board
			// we update bestMove
			if( childValue > bestValue ) {

				log.debug( "Found a better value childvalue=" + String.format( "%.5f", childValue ) + " bestValue="
						+ String.format( "%.5f", bestValue ) );
				bestMove = legalMove;
				bestValue = childValue;
			}
		}

		return bestMove;
	}

	private double miniMaxRec( Board board, int depth ) {

		log.debug( "Entering recursive minimax, depth=" + depth );

		if( depth >= DEPTH || evaluator.isGameOver( board ) )
			return evaluateBoard( board, depth );

		log.debug( "Game not over yet, proceeding..." );

		double bestValue = Double.NEGATIVE_INFINITY;
		// double bestValue = depth % 2 == 1 ? Double.NEGATIVE_INFINITY :
		// Double.POSITIVE_INFINITY;

		List<Integer> legalMoves = board.getLegalMoves();

		for( Integer legalMove : legalMoves ) {

			int[] childFields = board.getFields().clone();
			int nextPlayer = board.getNextPlayer();

			Board child = new TicBoard( childFields, nextPlayer );
			child.makeMove( legalMove );

			double childValue = -miniMaxRec( child, depth + 1 );

			bestValue = Math.max( childValue, bestValue );
			// bestValue = depth % 2 == 1 ? Math.max( -childValue, bestValue ) :
			// Math.max( -childValue, bestValue ) ;

			log.debug( "Depth=" + depth + ", best value so far = " + bestValue );
		}

		return bestValue;
	}

	private double evaluateBoard( Board board, int depth ) {

		int[] fields = board.getFields().clone();

		log.debug( "Evaluating board, original fields=" + Arrays.toString( fields ) );

		// Reverse the board if evaling for opponent
		if( depth % 2 == 0 ) { // && value == Board.PLAYER_ONE ) {
			for( int i = 0; i < board.getSize(); i++ ) {
				fields[i] = -1 * fields[i];
			}

			log.debug( "Evaluating board as for opponent, reversed fields=" + Arrays.toString( fields ) );
		}

		double[] networkInput = new double[network.getInput().getNeurons().size()];
		double[] networkOutput = new double[network.getOutput().getNeurons().size()];

		generateInput( networkInput, fields );

		network.getInput().feedInput( networkInput );
		network.getInput().initFeedForward();

		networkOutput = network.getOutput().retreiveOutput();

		network.resetNeurons();

		if( depth % 2 == 1 ) {
			log.debug( "Evaluated board (self) to output=" + networkOutput[0] );
			return networkOutput[0];
		} else {
			log.debug( "Evaluated board (oppo) to output=" + ( networkOutput[0] ) );
			return networkOutput[0];
		}

		// return depth % 2 == 0 ? networkOutput[0] : -networkOutput[0];

		// return networkOutput[0];
	}

	private void generateInput( double[] ni, int[] b ) {

		// 0 | 1 | 2
		// ---------
		// 3 | 4 | 5
		// ---------
		// 6 | 7 | 8

		for( int i = 0; i < b.length; i++ ) {
			if( b[i] == Board.PLAYER_ONE ) {
				ni[i * 2] = 1;
				ni[i * 2 + 1] = -1;
			} else if( b[i] == Board.PLAYER_TWO ) {
				ni[i * 2] = -1;
				ni[i * 2 + 1] = 1;
			} else {
				ni[i * 2] = -1;
				ni[i * 2 + 1] = -1;
			}
		}
	}

	@Override
	public int getValue() {

		return value;
	}

	@Override
	public void setValue( int value ) {
		this.value = value;
	}
}
