from Quoridor_game import QuoridorAction, WHITE, BLACK, TIE, QuoridorState
from alpha_beta_enhanced import AlphaBetaEnhancedSearch
from game_agent import GameAgent
from game_runner import GameRunner
from time import clock
from datetime import datetime
from astar_agent import Astar_Agent
from astar_problem import Astar_State, Astar_Action

PAUSES = False
PRINT_TRACE = False
WIN_UTIL = 10000

ASTAR_W = 0.8
SAFETY_MARGIN = 0.04


class SelectiveAlphaBetaAgent(GameAgent):

    def move(self, game_state):
        action = self.alphaBeta.search(game_state)
        
        if (PAUSES):
            raw_input('enter to continue...')
        
        return action
    
    def setup(self, player, game_state, turn_time_limit, setup_time_limit):
        self.player = player
        u = lambda q_state, manhatten_targetW, manhatten_targetB, end_time: self.utility(q_state, manhatten_targetW, manhatten_targetB, end_time)
        #print "utility set"
#        self.alphaBeta = AlphaBetaEnhancedSearch(self.player, self.depth, u, 58, True)
        self.alphaBeta = AlphaBetaEnhancedSearch(self.player, 1000, u, turn_time_limit-SAFETY_MARGIN, False, 0.3, 1)
        if self.player == WHITE:
            self.alphaBeta.GO_BACK_ACTIONS = [QuoridorAction('M',[pieceNdx, 'N']) for pieceNdx in range(game_state.numPieces)]
        else:
            self.alphaBeta.GO_BACK_ACTIONS = [QuoridorAction('M',[pieceNdx, 'S']) for pieceNdx in range(game_state.numPieces)]
        self.walls_coeff =  200.* game_state.brdSize / game_state.WallsLeftW
        self.goalreach_coeff = 200 * game_state.brdSize

    def utility(self, q_state, manhatten_targetsW, manhatten_targetsB, end_time):
        winner = q_state.getWinner()
        
        if winner is None:
    #        print "manhatten_dist=", manhatten_dist
            d_diff = self.distances_dif(q_state)
    #        print "d_diff=", d_diff
            walls_diff = self.walls_left_dif(q_state)
            #print "walls_dif=", walls_diff
            move_motivation = self.move_motivate(q_state)
            playerturn_cost = self.playerturnUtil(q_state)
            token_at_goal = self.tokens_at_goal(q_state)
            manhatten_dist = self.manhathen_distance(q_state, manhatten_targetsW, manhatten_targetsB, end_time)
            utilityValW = manhatten_dist*100 + d_diff*150 + token_at_goal*self.goalreach_coeff + walls_diff*self.walls_coeff + playerturn_cost*100 + move_motivation*0.1 #+self.mobility(q_state)
#            print "util %s,%s: %s, targets: %s, %s" % (q_state.locationsW, q_state.locationsB, utilityValW, manhatten_targetsW, manhatten_targetsB)
            return utilityValW
        elif winner == self.player:
#            if PRINT_TRACE: print "util %s,%s: W wins: %s" % (q_state.locationsW[0], q_state.locationsB[0], 10000 + turnsLeft_cost)
            turnsLeft_cost = q_state.turnsLeft
            return WIN_UTIL + turnsLeft_cost
        elif winner == TIE:
#            print "util %s,%s: TIE" % (q_state.locationsW[0], q_state.locationsB[0])
            return 0
        else:
#            print "util %s,%s: B wins: %s" % (q_state.locationsW[0], q_state.locationsB[0], -10000 + turnsLeft_cost)
            turnsLeft_cost = q_state.turnsLeft
            return -WIN_UTIL - turnsLeft_cost

    def manhathen_distance(self, q_state, manhatten_targetsW, manhatten_targetsB, end_time):
        lenW = 0
        lenB = 0
        agent = Astar_Agent()
        bad_actW = Astar_Action('N')
        bad_actB = Astar_Action('S')
        for i in range(q_state.numPieces):
            locW = q_state.locationsW[i]
            if locW is not None:                
                problemW = Astar_State(manhatten_targetsW[i], locW, q_state, WHITE)
                if clock() >= end_time: return 0
                solutionW = agent.solve(problemW, ASTAR_W)
                if solutionW is not None:
                    lenW += len(solutionW)
                    #print "lenW=%s, count_bad*2=%s" % (lenW, solutionW.count(bad_act)*2)
                    lenW += solutionW.count(bad_actW)*2
                else:
                    lenW += q_state.brdSize**2
            locB = q_state.locationsB[i]
            if locB is not None:                
                problemB = Astar_State(manhatten_targetsB[i], locB, q_state, BLACK)
                if clock() >= end_time: return 0
                solutionB = agent.solve(problemB, ASTAR_W)
                if solutionB is not None:
                    lenB += len(solutionB)
                    #print "lenB=%s, count_bad*2=%s" % (lenB, solutionB.count(bad_act)*2)
                    lenB += solutionB.count(bad_actB)*2
                else:
                    lenB += q_state.brdSize**2      
            
        cost = lenB - lenW      
        if self.player == WHITE:
            return cost
        else:
            return -cost           
        
    def tokens_at_goal(self, q_state):
        retval = 0
        retval += q_state.locationsW.count(None)
        retval -= q_state.locationsB.count(None)
        if self.player == WHITE:
            return retval
        else:
            return -retval           

    def playerturnUtil(self, q_state):
        myturn = q_state.curPlayer == WHITE
        if self.player == WHITE:
            return myturn
        else:
            return -myturn
                    
    def move_motivate(self, q_state):
        retval = 0
        for i in range(q_state.numPieces):
            halfBrd = q_state.brdSize /2
            if q_state.locationsW[i] is not None:
                if q_state.locationsW[i][1] > halfBrd:
                    retval += q_state.brdSize - q_state.locationsW[i][1]
                else:
                    retval += q_state.locationsW[i][1]
            if q_state.locationsB[i] is not None:
                if q_state.locationsB[i][1] > halfBrd:
                    retval -= q_state.brdSize - q_state.locationsB[i][1]
                else:
                    retval -= q_state.locationsB[i][1]
        if self.player == WHITE:
            return retval
        else:
            return -retval

    # distances_dif heuristic is diff of distances of the agents of each player from the goal
    def distances_dif(self, q_state):
        whiteDistance = 0
        for posW in q_state.locationsW:
            if posW != None: 
                whiteDistance += posW[0] # row dist
        
        blackDistance = 0
        for posB in q_state.locationsB:
            if posB != None: 
                blackDistance += (q_state.brdSize - 1 - posB[0])
        
        distances_dif_ValW = whiteDistance - blackDistance
        if self.player == WHITE:
            return distances_dif_ValW
        else:
            return -distances_dif_ValW
        
        
    # walls_left_dif heuristic is diff of walls left for each player 
    def walls_left_dif(self, q_state):
        whiteWallsLeft = q_state.WallsLeftW
        blackWallsLeft = q_state.WallsLeftB

        WallsLeftDiff = whiteWallsLeft - blackWallsLeft
        if self.player == WHITE:
            return WallsLeftDiff
        else:
            return -WallsLeftDiff      