from __future__ import division
from pdb import set_trace

from collections import defaultdict
from functools import partial
from time import clock

from functools32 import lru_cache

from GameAgent import GameAgent
from BlokusGameAction import BlokusGameAction
from BlokusGameConstants import TIE, NO_LIMIT
from BlokusGameAgentExampleX import BlokusGameAgentExample
from BlokusGameUtils import getIndexesOfTurnedOnBits
from BlokusGameState import BlokusGameState

#make it False if you don't want to cutoff the alpha beta by time (and remove the iterative deepening respectively in the move method)
CHECK_TIME = True

WIN_VALUE = float('inf')
TIE_VALUE = 0
LOSE_VALUE = -WIN_VALUE

class BlokusGameAgentFTW(BlokusGameAgentExample):
    #the fixedDepth will be used as the desired search depth, when there is no time limit.
    #if you wish to use this agent in your experiments, you can inherit from this class and set the fixedDepth with a parameter given to the constructor
    def __init__(self, isCalcFreeSquars = True):
        BlokusGameAgentExample.__init__(self)
        #self.prevBoards = defaultdict(partial(defaultdict,partial(int, 0)))
        #self.prevBoardsScores = defaultdict(partial(defaultdict,partial(int, 0)))
        self.prevBoards = defaultdict(partial(int, 0))
        self.prevBoardsScores = defaultdict(partial(int, 0))
        self.isCalcFreeSquars = isCalcFreeSquars
        self.isAlphaBetaSortSuccessors = True # used in BlokusGameAgentExample.move()
        #self.weights = {}


    def move(self, state):
        #for player in (state.currentPlayer, state.opponentPlayer):
        #    self.prevBoards[player] =  reduce(lambda board, color: board | color.boardBinary,
        #                                    player.colors,
        #                                    0)
        #    self.prevBoardsScores[player] = self._heuristicCalcBoardScore(state, self.prevBoards[player])
                
        #print "cache info:", BlokusGameAgentFTW.utility.cache_info()
        return BlokusGameAgentExample.move(self, state)

    #@lru_cache(512)
    def _hueristicSquareWeight(self, state, x, y):
        #return 1

        #half = (state.boardSizeWithBorder) // 2
        #if x > half: 
        #    x = state.boardSizeWithBorder - x
        #if y > half: 
        #    y = state.boardSizeWithBorder - y
        #return x + y
        #if (x,y) not in self.weights:
        #    self.weights[(x,y)] = 1/(abs((state.boardSizeWithBorder - 1)/2 - x) + abs((state.boardSizeWithBorder - 1)/2 - y))
        #return self.weights[(x,y)]
        
        return 1/(abs((state.boardSizeWithBorder - 1)/2 - x) + abs((state.boardSizeWithBorder - 1)/2 - y))

    
    def _heuristicAvailableSquaresToPlayBoard(self, state, myself, opponent):
        '''
        copy-paste from BlockusGameState.getSuccessors()
        '''
        total_availableSquaresToPlay = 0
        for currentColor in myself.colors:
            currentBoard = currentColor.boardBinary
            currentBoardNeighbours = (currentBoard >> 1) | (currentBoard << 1) | (currentBoard >> state.boardSizeWithBorder) | (currentBoard << state.boardSizeWithBorder)
            opponentBoards = 0
            for color in myself.colors:
                if not (color == currentColor):
                    opponentBoards |= color.boardBinary
            for color in opponent.colors:
                opponentBoards |= color.boardBinary
            freeSquares = ~(currentBoard | currentBoardNeighbours | opponentBoards | state.border)

            #first move allowed only in board's corners
            availableSquaresToPlay = state.boardCorners

            #if not first move - the board isn't empty
            if currentBoard:
                availableSquaresToPlay = BlokusGameState._BlokusGameState__getBoardDiagonalsToCorners(state, currentBoard)

            total_availableSquaresToPlay |= availableSquaresToPlay & freeSquares
        
        return total_availableSquaresToPlay

    #def _heuristicCalcBoardQuarterScore(self, boardQuarter):
    
    def _heuristicPlayerScore(self, state, myself, opponent):
        myBoard = reduce(lambda board, color: board | color.boardBinary,
                         myself.colors,
                         0)
        takenSquearsScore = sum((self._hueristicSquareWeight(state,
                                               index % state.boardSizeWithBorder,
                                               index // state.boardSizeWithBorder)
                   for index in getIndexesOfTurnedOnBits(myBoard)), 0)         

        availableSquaresToPlayBoard = self._heuristicAvailableSquaresToPlayBoard(state, myself, opponent)
        availableSquaresToPlayScore = sum((self._hueristicSquareWeight(state,
                                               index % state.boardSizeWithBorder,
                                               index // state.boardSizeWithBorder)
                   for index in getIndexesOfTurnedOnBits(availableSquaresToPlayBoard)), 0) if self.isCalcFreeSquars else 0

        return takenSquearsScore + 0.5 * availableSquaresToPlayScore


        ## make sure the prevBoard is really contained in the current
        #assert self.prevBoards[myself] & myBoard == self.prevBoards[myself]
        ##if self.prevBoards[myself] != 0 :print self.prevBoards[myself]
        #diffBoard = myBoard ^ self.prevBoards[myself]
        #return self.prevBoardsScores[myself] + self._heuristicCalcBoardScore(state, diffBoard)

    def _heuristicScore(self, state):
        return (self._heuristicPlayerScore(state, state.currentPlayer, state.opponentPlayer)
                - self._heuristicPlayerScore(state, state.opponentPlayer, state.currentPlayer))
        
    #@lru_cache(65536)
    def utility(self, state):
        winner = state.getWinner()

        if winner is None:
            retval = self._heuristicScore(state) if (self.player == state.getCurrentPlayer()) else -(self._heuristicScore(state))
            #retval =  ((state.currentPlayer.getScore() - state.opponentPlayer.getScore()) 
            #           if (self.player == state.getCurrentPlayer()) 
            #           else (state.opponentPlayer.getScore() - state.currentPlayer.getScore()))
        elif winner == self.player:
            retval = WIN_VALUE
        elif winner == TIE:
            retval = TIE_VALUE
        else:
            retval = LOSE_VALUE
                    
        return retval