from time import clock
from loa_game import Direction

# A bloody large number.
INFINITY = 1.0e400
DEBUG = False

class ExtendedAlphaBetaSearch:
    '''
    This search algorithm implements the limited-resource minimax tree search 
    algorithm with alpha-beta pruning for zero-sum games. This version cuts off 
    search by depth alone and uses an evaluation function (utility).
    '''
    
    def __init__(self, player, max_depth, utility, turn_time_limit, game_state):
        '''
        Constructor.
        
        @param player: Your player. This search algorithm will choose the best
                       actions for this player.
        @param max_depth: The depth of the search tree.
        @param utility: An evaluation function for states.
        @param turn_time_limit: The time given per turn.
        @param game_state: The initial state of the game.
        '''
        self.player = player
        self.max_depth = max_depth
        self.utility = utility
        
        self.best_action = None
        self.topValue = None
        self.timeLimit = turn_time_limit
        self.endTime = None
        self.currentAction = None
        self.prevBlack = game_state.blacks
        self.prevWhite = game_state.whites
        self.minKnowledge={}
        self.maxKnowledge={}
        self.pieces=[]
        for i in xrange(1,game_state.blacks+1):
            for j in xrange(1,game_state.whites+1):
                self.minKnowledge[(i,j)]={}
                self.maxKnowledge[(i,j)]={}
                #if game_state.board[i][j] == self.player:
                #    self.pieces.append((i,j))
        
        self.buffer = 0.02
        self.minF=lambda x : -self.evaluator(x[1])
        self.maxF=lambda x : self.evaluator(x[1])
        self.moves = game_state.size
        
    def checkTime(self):
        '''
        Throws an exception if timeout occurs. 
        '''
        if self.endTime-clock() <= 0:
            raise Exception("Timeout")

    def clean(self, blacks, whites):
        '''
        Cleans the database from irrelevant knowledge.
        
        @param blacks: The number of black pieces in the current state.
        @param whites: The number of white pieces in the current state.
        '''
        if blacks < self.prevBlack:
            for j in xrange (1,self.prevWhite):
                self.minKnowledge[(self.prevBlack,j)].clear()
                self.maxKnowledge[(self.prevBlack,j)].clear()
        if whites < self.prevWhite:
            for i in xrange (1,self.prevBlack):
                self.minKnowledge[(i,self.prevWhite)].clear()
                self.maxKnowledge[(i,self.prevWhite)].clear()
        self.prevBlack = blacks
        self.prevWhite = whites
    
    def rallyTroops(self, state):
        '''
        If we ever figure out how to iterate our pieces with consideration to 
        spin actions, 
        '''        
        if self.player == 'W':
            if state.whites == len(self.pieces):
                pass
        else:
            if state.blacks == len(self.pieces):
                pass
        return

    def search(self, current_state, startTime):
        '''
        Search game to determine best action; use alpha-beta pruning.
        
        @param current_state: The current game state to start the search from.
        @param startTime: Start time. Duh.
        '''
        self.limit_depth = self.max_depth
        self.endTime = startTime + self.timeLimit - self.buffer
        best_value = -INFINITY
        successors = sorted(current_state.getSuccessors().items(),key=self.maxF) # get the successors list, sorted by the evaluator 
        # save the initial action as best, in case we don't have time to find another.
        firstMove = successors[0]
        self.best_action = firstMove[0]
        self.evaluator(firstMove[1])
        self.topValue = self.utility(firstMove[1])

        self.clean(current_state.blacks,current_state.whites)
        while True:
            for action,state in successors:
                value_fn = self._getValueFn(state)
                self.currentAction = action
                try:
                    value = value_fn(state, best_value, INFINITY, 1)
                except Exception, e:
                    print e
                    return self.best_action
                if value > best_value:
                    best_value = value
                    self.best_action = action
            self.limit_depth += 1
            if self.endTime-clock() <= 0:
                return self.best_action
        return self.best_action
    
    def _getValueFn(self, state):
        if state.getCurrentPlayer() == self.player:
            return self._maxValue
        else:
            return self._minValue
    
    def _cutoffTest(self, state, depth):
        return depth >= self.limit_depth or (state.getWinner() is not None)
    
    def _maxValue(self, state, alpha, beta, depth):
        if DEBUG: print depth, self.limit_depth
#        try:
#            utility = self.maxKnowledge[(self.prevBlack,self.prevWhite)][hash(state)]
#        except KeyError:
#            utility = self.utility(state)
        utility = self.utility(state)
            
        if self._cutoffTest(state, depth):
            return utility
        # Here, we learn
        self.maxKnowledge[(self.prevBlack,self.prevWhite)][hash(state)] = utility        
        if utility > self.topValue:
            self.best_action = self.currentAction
        self.checkTime()
        value = -INFINITY
        successors = sorted(state.getSuccessors().items(),key=self.maxF)
        for action,successor in successors:
            self.checkTime()
            value_fn = self._getValueFn(successor)
            value = max(value, value_fn(successor, alpha, beta, depth + 1))
            self.checkTime()
            if value >= beta:
                return value
            alpha = max(alpha, value)
            self.checkTime()
        return value
    
    def _minValue(self, state, alpha, beta, depth):
        if DEBUG: print depth, self.limit_depth
#        try:
#            utility = self.minKnowledge[(self.prevBlack,self.prevWhite)][hash(state)]
#        except KeyError:
#            utility = self.utility(state)
        utility = self.utility(state)
        
        if self._cutoffTest(state, depth):
            return utility
        # Here, we learn
        self.minKnowledge[(self.prevBlack,self.prevWhite)][hash(state)] = utility
        if  utility > self.topValue:
            self.best_action = self.currentAction
        self.checkTime()
        value = INFINITY
        successors = sorted(state.getSuccessors().items(),key=self.minF)
        for action,successor in successors:
            self.checkTime()
            value_fn = self._getValueFn(successor)
            value = min(value, value_fn(successor, alpha, beta, depth + 1))
            self.checkTime()
            if value <= alpha:
                return value
            beta = min(beta, value)
            self.checkTime()
        return value
    
    def evaluator(self,state):
        '''
        Evaluation function for node reorder.
        
        @param state: the state to be evaluated.
        '''
        if state.getCurrentPlayer() == self.player:
            if not hasattr(state, "maxEvaluator"):
                state.maxEvaluator = None
            elif not (state.maxEvaluator is None):
                return state.maxEvaluator
        else:
            if not hasattr(state, "minEvaluator"):
                state.minEvaluator = None
            elif not (state.minEvaluator is None):
                return state.minEvaluator
        self.moves -= 1
        if self.moves <= 0: 
            myMinX = state.size
            myMaxX = 0
            myMinY = state.size
            myMaxY = 0
            opMinX = state.size
            opMaxX = 0
            opMinY = state.size
            opMaxY = 0
            i = 0
            consistencyReward = 0
            self.checkTime()
            while i < state.size:
                maxConnections = 0
                j = 0
                while j < state.size:
                    if state.board[i][j] == self.player:
                        myConections = 0
                        for k in xrange(i-1, i+1):
                            for l in xrange (j-1, j+1):
                                if (k,l) >= (0,0) and (k,l) < (state.size,state.size) and (k,l) != (i,j):
                                    myConections += (state.board[k][l] == self.player)
                        if myConections > maxConnections:
                            state.candidate = (i,j)
                            maxConnections = myConections 
                        if 0 == myConections:
                            consistencyReward -= 1
                        consistencyReward += myConections
                            
                        if j < myMinX: myMinX = j
                        elif j > myMaxX: myMaxX = j
                        if i < myMinY: myMinY = i
                        elif i > myMaxY: myMaxY = i
                    else:
                        if j < opMinX: opMinX = j
                        elif j > opMaxX: opMaxX = j
                        if i < opMinY: opMinY = i
                        elif i > opMaxY: opMaxY = i
                    j += 1
                i += 1
            #endwhile
            #myVal = 1/((myMaxX-myMinX+1)*(myMaxY-myMinY+1))
            #opVal = 1/((opMaxX-opMinX+1)*(opMaxY-opMinY+1))
            penalty = (state.board[0][0] == self.player)+\
                        (state.board[state.size-1][0] == self.player)+\
                        (state.board[0][state.size-1] == self.player)+\
                        (state.board[state.size-1][state.size-1] == self.player)
            cornerTax = state.size^2
            constWeight = cornerTax*2
            rangeTax = 10.0
            size = ((myMaxX-myMinX+1)*(myMaxY-myMinY+1))
            if 0 == size: size = 1
            myVal = rangeTax/size - cornerTax*penalty + constWeight*consistencyReward
            opVal = (opMaxX-opMinX+1)*(opMaxY-opMinY+1)
        else:
            myMinX = 0
            myMaxX = 0
            myMinY = 0
            myMaxY = 0            
            rangeTax = 0
            penalty = 0
            consistencyReward = 0
            size = 0
            myVal = 1
            opVal = 1
#        f=open('evaluator_file', 'w')
#        f.write(str(rangeTax/((myMaxX-myMinX+1)*(myMaxY-myMinY+1)))+"\t"+str(penalty)+"\t"+str(consistencyReward))
#        f.close()
        #print size, penalty, consistencyReward, myVal
        if state.getCurrentPlayer() == self.player:
            state.maxEvaluator = myVal
        else:
            state.minEvaluator = myVal
        return myVal
        return opVal

# TODO: maintain dict of our pieces in order to improve speed of iteration - also requires to save last action