from config import *
#from othello import c
import random, time, sys

infinity = 1000000000000000000000000000000000000000

class Player(object):
    color_dict = {1:'White',-1:'Black'}
    def __init__(self, color):
        self.color = color
    def get_move(self, game):
        'Override in child class to return a move in the form (x,y)'
        raise NotImplementedError

'''class HumanPlayer(Player):
    'Player that moves based on input'
    def get_move(self,board):
        print c
        c.text(10,10,'Hi')
        time.sleep(10)
        return (0,0)'''

class RandomPlayer(Player):
    'Player that moves randomly'
    def get_move(self,board):
        return random.choice(board.legal_moves(self.color))
        #print 'Random computer moves to (%d, %d)' % (x,y)
        #game.c.text(0,10,'Random computer moves to (%d, %d)' % (x,y))
        #game.c.text(x+1,y+1,game.board.print_dict[self.color], 0x1f)
        #game.board.print_piece(x+1,y+1,self.color,MOVE)
        #time.sleep(0.5)

class HumanPlayer(Player):
    pass

'Functions computer players can use to determine best move'
def terminal_value(board,player):
    if board.piece_count(player) > 0:
        return 100000
    elif board.piece_count(player) < 0:
        return -100000
    else:
        return 0

def count_difference(board,player):
    if board.game_over():
        return terminal_value(board,player)
    return board.piece_count(player) - board.piece_count(player*-1)

def weighted_squares(board,player):
    from config import weights 
    if board.game_over():
        return terminal_value(board,player)
    l = map(lambda a,b: a*b, board.flatten(), weights)
    v = sum(l) * player
    return v

def inverse_strategy(board,player):
    return -1 * count_difference(board,player)

class ComputerPlayer(Player):
    'Defines eval_fn for computer players'
    def __init__(self,color,eval_fn):
        self.eval_fn = eval_fn
        Player.__init__(self,color)


class Maximizer(ComputerPlayer):
    'Computer player that moves to maximize eval_fn'
    def get_move(self,board):
        legal = board.legal_moves(self.color)
        values = []
        for move in legal:
            boardcp = board.copy()
            boardcp.put(move)
            boardcp.make_flips(move)
            values.append(self.eval_fn(boardcp,self.color))
        move = legal[values.index(max(values))]
        return move

class Minimax(ComputerPlayer):
    'Computer player that moves to maximize eval_fn, searches ahead ply steps'
    def __init__(self,color,eval_fn,ply):
        self.ply = ply
        ComputerPlayer.__init__(self,color,eval_fn)
    def get_move(self,board):
        #sys.stdout = open('debug.txt','w')
        #print 'Begin'
        best = None
        # try each move
        for move in board.legal_moves():
            b = board.copy()
            b.make_move(move)
            #print 'Consider: (%d,%d)' % (move.x,move.y)
            val = -1 * self.minimax_value(b, self.color, self.ply-1, self.eval_fn)
            #print 'Value is %d' % val
            # update the best operator so far
            if best is None or val > best[0]:
                best = (val, move)
        #print 'Best move: (%d,%d)' % (best[1].x, best[1].y)
        #sys.stdout = sys.__stdout__
        return best[1]
    def minimax_value(self, board, color, maxply, eval_fn):
        if maxply == 0 or board.game_over():
            return eval_fn(board,1)

        best = None
        # try each move
        for move in board.legal_moves():
            b = board.copy()
            b.make_move(move)
            val = -1 * self.minimax_value(b, color, maxply-1, eval_fn)
            tabs =  '\t' * (self.ply-maxply)
            #print tabs + 'Opponent could respond with (%d,%d) and give value of %d' % (move.x,move.y,val)
            if best is None or val > best:
                best = val

        return best
def neg(val):
    if val is None:
        return None
    else:
        return -1 * val

class AlphaBeta(Minimax):
    def get_move(self,board):
        """Find the best move in the game, looking ahead maxply moves.

        Returns a tuple (estimated value, move)
        The game must support the following functions:
        
            copy() to make a deep copy of the game
            score() a naive utility fn for non-terminal positions,
                   but precise for terminal positions
            terminal_test() to determine whether the game is over
            generate_moves() to return a list of legal moves in the current
                             game state
            play_move() to modify the game state by applying an operator

        maxply  the number of moves of lookahead 0=> no lookahead

        eval_fn instead of using the default game.eval(),
                to be used only for non-terminal positions
        """

        best_val, best_move = None, None
        # try each move    
        for move in board.legal_moves():
            b = board.copy()
            b.make_move(move)
            # evaluate the position and choose the best move
            # NOTE: the minimax function computes the value for the current
            # player which is the opponent so we need to invert the value
            # the -ve of my best val is the opponent's beta value
            if best_val is not None:
                opp_beta = -1 * best_val
            else:
                opp_beta = None
            val = -1 * self.alphabeta_value(b, self.ply, None, opp_beta, self.eval_fn)
            # update the best operator so far
            if best_val is None or val > best_val:
                (best_val, best_move) = (val, move)

        return best_move


    def alphabeta_value(self, board, maxply, alpha, beta, eval_fn = None):
        """Find the utility value of the game w.r.t. the current player.

        alpha = None => -inf
        beta = None => +inf
        """

        # if we have reached the maximum depth, the utility is approximated
        # with the evaluation function
        if maxply == 0 or board.game_over():
            return eval_fn(board,board.player)

        # try each move
        for move in board.legal_moves():
            b = board.copy()
            b.make_move(move)
            # evaluate the position and choose the best move
            # NOTE: the minimax function computes the value for the current
            # player which is the opponent so we need to invert the value
            # invert alpha beta values and meaning, think of the following
            #     alpha <=  my score <=  beta
            # => -alpha >= -my score >= -beta
            # => -alpha >= opp score >=  beta
            # => -beta  <= opp score <= -alpha
            if beta is not None:
                opp_alpha = -1 * beta
            else:
                opp_alpha = None
            if alpha is not None:
                opp_beta = -1 * alpha
            else:
                opp_beta = None
            val = -1 * self.alphabeta_value(b, maxply-1, opp_alpha, opp_beta, eval_fn)
            #print val, alpha, beta
            #raw_input('')
            # prune using the alpha-beta condition
            if (val is not None) and (beta is not None) and val >= beta:
                # I suppose we could return alpha here as well
                #print 'Pruned'
                return beta
            # update alpha (current player's low bound)
            if alpha is None or val > alpha:
                alpha = val

        
        # alpha is my best score
        #print 'Not Pruned'
        return alpha
                    
