from time import clock
from math import exp
from loa_game import DIRECTIONS, WHITE, BLACK, EMPTY
# A bloody large number.
INFINITY = 1.0e400

class TimeException(Exception):
    def __init__(self, msg_, value_):
        self.msg = msg_
        self.value = value_
    def __str__(self):
        return str(self.msg)+", "+str(self.value)

class AnyTimeAlphaBetaSearch_Extended:
    '''
    This search algorithm implements the limited-resource minimax tree search 
    algorithm with alpha-beta pruning for zero-sum games. This version cuts off 
    search by depth alone and uses an evaluation function (utility).
    '''
    
    def __init__(self, player, max_depth, utility, turn_time_limit, game_state):
        '''
        Constructor.
        
        @param player: Your player. This search algorithm will choose the best
                       actions for this player.
        @param max_depth: The depth of the search tree.
        @param utility: An evaluation function for states.
        '''
        self.player = player
        self.max_depth = max_depth
        self.utility = utility
        
        self.timeBuffer = 0.03
        self.turnTime = turn_time_limit
        self.bestValue = -INFINITY
        self.prevBlack = game_state.blacks
        self.prevWhite = game_state.whites
        self.utilityKnowledge = {}
        self.evaluatorKnowledge = {}
        for i in xrange(1,self.prevBlack+1):
            for j in xrange(1,self.prevWhite+1):
                self.utilityKnowledge[(i,j)] = {}
                self.evaluatorKnowledge[(i,j)] = {}
    
    def clean(self, blacks, whites):
        '''
        Cleans the database from irrelevant knowledge.
        
        @param blacks: The number of black pieces in the current state.
        @param whites: The number of white pieces in the current state.
        '''
        if blacks < self.prevBlack:
            for j in xrange (1,self.prevWhite):
                self.utilityKnowledge[(self.prevBlack,j)].clear()
                self.evaluatorKnowledge[(self.prevBlack,j)].clear()
        if whites < self.prevWhite:
            for i in xrange (1,self.prevBlack):
                self.utilityKnowledge[(i,self.prevWhite)].clear()
                self.evaluatorKnowledge[(i,self.prevWhite)].clear()
        self.prevBlack = blacks
        self.prevWhite = whites
    
    def search(self, current_state):
        '''
        Search game to determine best action; use alpha-beta pruning.
        
        @param current_state: The current game state to start the search from.
        '''
        self.depthLimit = self.max_depth
        self.endTime = clock() + self.turnTime - self.timeBuffer
        successors = current_state.getSuccessors().items()
        try:
            f = lambda state: self.evaluator(state[1])
            successors = sorted(successors,key=f)
            self.bestAction = successors[0][0]
            self.bestValue = self.utility(successors[0][1], self.endTime)
        except TimeException, e:
            return successors[0][0]            
        self.clean(current_state.blacks,current_state.whites)
        while True:
            for action, state in successors:
                self.currentAction = action
                value_fn = self._getValueFn(state)
                try:
                    value = value_fn(state, self.bestValue, INFINITY, 1)
                except TimeException, e:
#                    print e,self.usage
                    if e.value > self.bestValue:
                        return self.currentAction
                    return self.bestAction
                except Exception:
                    print "oops"
                if value > self.bestValue:
                    self.bestValue = value
                    self.bestAction = action
            self.depthLimit += 1
            if self.endTime-clock() < 0:
                return self.bestAction
        return self.bestAction
    
    def _getValueFn(self, state):
        if state.getCurrentPlayer() == self.player:
            return self._maxValue
        else:
            return self._minValue
    
    def _cutoffTest(self, state, depth):
        return depth >= self.depthLimit or (state.getWinner() is not None)
    
    def _maxValue(self, state, alpha, beta, depth):
        try:
            utility = self.utilityKnowledge[(state.blacks,state.whites)][hash(state)]
        except:
            self.checkTime(self.bestValue)
            utility = self.utility(state, self.endTime)
            self.utilityKnowledge[(state.blacks,state.whites)][hash(state)] = utility

        if self._cutoffTest(state, depth):
            return utility
        if utility > self.bestValue:
            self.bestValue = utility
            self.bestAction = self.currentAction
        
        value = utility
        self.checkTime(value)
        # reorder the successors.
        successors = state.getSuccessors().items()
        f = lambda state: self.evaluator(state[1])
        successors = sorted(successors,key=f)
        for action,successor in successors:
            value_fn = self._getValueFn(successor)
            value = max(value, value_fn(successor, alpha, beta, depth + 1))
            if value >= beta:
                return value
            alpha = max(alpha, value)
            self.checkTime(value)
        return value
    
    def _minValue(self, state, alpha, beta, depth):
        try:
            utility = self.utilityKnowledge[(state.blacks,state.whites)][hash(state)]
        except:
            self.checkTime(self.bestValue)
            utility = self.utility(state, self.endTime)
            self.utilityKnowledge[(state.blacks,state.whites)][hash(state)] = utility

        if self._cutoffTest(state, depth):
            return utility
        if utility > self.bestValue:
            self.bestValue = utility
        
        value = utility
        self.checkTime(value)
        # reorder the successors.
        successors = state.getSuccessors().items()
        f = lambda state: -self.evaluator(state[1])
        successors = sorted(successors,key=f)
        for action,successor in successors:
            value_fn = self._getValueFn(successor)
            value = min(value, value_fn(successor, alpha, beta, depth + 1))
            if value <= alpha:
                return value
            beta = min(beta, value)
            self.checkTime(value)
        return value
    
    def checkTime(self,value):
        if self.endTime-clock() < 0:
            raise TimeException("Timeout",value)

    def evaluator(self, state):
        try:
            '''If we already calculated the evaluation function for this state, fetch it.'''
            retVal =  self.evaluatorKnowledge[(state.blacks,state.whites)][hash(state)]
            state.winner = retVal[1]
        except:
            '''Calculate the evaluation function for the given state.'''
            myList, opList = [],[]
            myScc, opScc = 0,0
            for i in xrange(0, state.size):
                for j in xrange(0,state.size):
                    if state.board[i][j] == self.player:
                        if not((i,j) in myList):
                            self.checkTime(self.bestValue)
                            myList += self._getConnectiveComponent(state, i, j, self.player)
                            self.checkTime(self.bestValue)
                            myScc += 1
                    elif state.board[i][j] != EMPTY:
                        if not((i,j) in opList):
                            self.checkTime(self.bestValue)
                            opList += self._getConnectiveComponent(state, i, j, state.board[i][j])
                            self.checkTime(self.bestValue)
                            opScc += 1
            score = opScc/(myScc**2)
            if 1 == myScc and 1 != opScc: retVal = (score, INFINITY)
            elif 1 != myScc and 1 == opScc: retVal = (score, -INFINITY)
            else: retVal = (score, 0) 
#            retVal = exp(-myScc)
            self.evaluatorKnowledge[(state.blacks,state.whites)][hash(state)] = retVal
            state.winner = retVal[1]            
        return retVal[0]
        
    def _getConnectiveComponent(self, state, initial_row, initial_col, player):
        '''
        Performs BFS to traverse the connective component, beginning at the given coordinate.
        
        @param initial_row: The initial row coordinate.
        @param initial_col: The initial column coordinate.
        @param player: The player who's connective component we would like to traverse.
        @return: The closed set - the connective component.
        '''
        open = set()
        open.add((initial_row, initial_col))
        closed = set()
        
        while len(open) > 0:
            self.checkTime(self.bestValue)
            (row, col) = open.pop()
            closed.add((row, col))
            for direction in DIRECTIONS:
                (neighbor_row, neighbor_col) = direction.move((row, col))
                if (0 <= neighbor_row < state.size) and (0 <= neighbor_col < state.size) \
                    and (state.board[neighbor_row][neighbor_col] == player) \
                    and ((neighbor_row, neighbor_col) not in closed):
                    open.add((neighbor_row, neighbor_col))
        return closed