"""
Go Players
"""

from random import Random
from pygo1963.goLogic.GoRules import GoRules
from pygo1963.model.Constants import WHITE_COLOR, BLACK_COLOR
from pygo1963.goLogic.GoEvaluator import AdvacedGoEvaluator
from pygo1963.model.DepthManager import DepthManager
from pygo1963.model.Vertex import Vertex
from pygo1963.model.Move import Move
import time
from pygo1963.model.OpeningKnowledge import OpeningKnowledge


DEBUG = False

class BasePlayer(object):
    """
    Base class for all the Go players.
    """
    
    def __init__(self, color):
        self.color = color
        
    def __str__(self):
        if self.color == BLACK_COLOR:
            return 'Black player'
        else:
            return 'White player'
    
    def notify_invalid_move(self):
        """ 
        The player is notified that the last attempted move was 
        invalid. 
        """
        pass
    

class ComputerPlayer(BasePlayer):
    """
    Player with all the computer engine initialization.
    """
    
    def __init__(self, color, depth_manager=None, evaluator=AdvacedGoEvaluator()):
        
        super(ComputerPlayer, self).__init__(color)
        
        if not depth_manager:
            depth_manager = DepthManager()
        
        self.opening_knowledge = OpeningKnowledge(color)
        self.depth_manager = depth_manager
        self.evaluator = evaluator
        self.go_rules = GoRules()
    
    

class MinimaxPlayer(ComputerPlayer):
    """
    Engine player that uses the minimax algorithm to deduce the best move. 
    """
    
    def make_move(self, board):
        """
        Ask engine player for his move.
        """
        self.number_of_moves = 0
        outcome = []

        if self.color == WHITE_COLOR:
            white_turn = False
        else:
            white_turn = True
            
        for move in self.go_rules.generate_valid_moves(board, self.color):
            outcome.append((move, self.evaluate_move(board, move, self.depth_manager.depth - 1, white_turn)))

        if self.color == WHITE_COLOR:
            best_move = max(outcome, key=lambda x: x[1])
        else:
            best_move = min(outcome, key=lambda x: x[1])
            
        if DEBUG:
            print "Best moves: ", outcome
            print "Number of moves calculated: ", self.number_of_moves 
            
        return best_move[0]

    def evaluate_move(self, board, move, depth, white_turn):
        '''
        Evaluates a move using minimax.
        '''
        self.number_of_moves += 1
        board.make_move(move)
        
        if depth == 0 or board.game_finished:
            score = self.evaluator.evaluate(board)
            board.undo_move()
            return score

        outcome = []
              
        color = WHITE_COLOR if white_turn else BLACK_COLOR
         
        for child_move in self.go_rules.generate_valid_moves(board, color):
            outcome.append(self.evaluate_move(board, child_move, depth - 1, not white_turn))

        board.undo_move()

        if white_turn:
            return max(outcome)
        else:
            return min(outcome)

class RandomPlayer(ComputerPlayer):
    '''
    Engine player that moves randomly.
    '''

    def make_move(self, board):
        '''
        Ask engine player for his move.
        '''

        valid_moves = self.go_rules.generate_valid_moves(board, self.color)
        random = Random()
        random_move_index = Random.randint(random, 0, len(valid_moves) - 1)

        return valid_moves[random_move_index]

class AlphaBetaPlayer(ComputerPlayer):
    '''
    Engine player that uses alpha-beta algorithm
    '''
    
    def make_move(self, board):
        '''
        Ask engine player for his move.
        '''
        outcome = []
        self.number_of_moves = 0
        self.cut1 = 0
        self.cut2 = 0
        
        move = self.opening_knowledge.get_opening_move(board)
        
        if move:
            return move
        
        self.depth_manager.update_depth(board)
        
        if self.color == WHITE_COLOR:
            white_turn = True
        else:
            white_turn = False
        
        alpha = -2000
        beta = 2000
        valid_moves = self.go_rules.generate_valid_moves(board, self.color)
        
        for move in self.order_moves(board, valid_moves, white_turn):
            value = self.evaluate_move(board, move, alpha, beta, self.depth_manager.depth - 1, not white_turn)
            if self.color == WHITE_COLOR:
                alpha = value
            else:
                beta = value
            
            outcome.append((move, value))

        if self.color == WHITE_COLOR:
            best_move = max(outcome, key=lambda x: x[1])
            
            if best_move[1] <= -1000:
                return Move(WHITE_COLOR, Vertex.PASS())
        else:
            best_move = min(outcome, key=lambda x: x[1])
            
            if best_move[1] >= 1000:
                return Move(BLACK_COLOR, Vertex.PASS())

        if DEBUG:
            print "Best moves: ", outcome
            print "Number of moves calculated: ", self.number_of_moves 
            print "Cut1: ", self.cut1
            print "Cut2: ", self.cut2

        return best_move[0]

    def order_moves(self, board, move_list, white_turn):
        """
        Order the moves according to their evaluation.
        """
        
        def evaluate(board, move):
            board.make_move(move)
            score = self.evaluator.evaluate(board)
            board.undo_move()
            
            return score
       
        if white_turn:        
            move_list.sort(key=lambda x: evaluate(board, x), reverse=True)
        else:
            move_list.sort(key=lambda x: evaluate(board, x), reverse=False)
        
        return move_list

    def evaluate_move(self, board, move, alpha, beta, depth, white_turn):
        '''
        Evaluates the move with the alpha-beta algorithm.
        '''
        
        self.number_of_moves += 1
        board.make_move(move)
        
        if depth == 0 or board.game_finished:
            score = self.evaluator.evaluate(board)
            
            if board.game_finished:
                if board.winner == WHITE_COLOR:
                    score += depth
                else:
                    score -= depth
            
            board.undo_move()
            
            return score

        color = WHITE_COLOR if white_turn else BLACK_COLOR
        
        moves = self.go_rules.generate_valid_moves(board, color)
        
        if self.depth_manager.depth - depth <= 2 and depth != 1:
            moves = self.order_moves(board, moves, white_turn)      

        if white_turn:
            for child_move in moves:
                alpha = max (alpha, self.evaluate_move(board, child_move, alpha, beta, depth - 1, not white_turn))
                if alpha >= beta:
                    self.cut1 += 1
                    break
            board.undo_move()
            return alpha
        else:
            for child_move in moves:
                beta = min(beta, self.evaluate_move(board, child_move, alpha, beta, depth -1, not white_turn))
                if alpha >= beta:
                    self.cut2 += 1
                    break
            board.undo_move()
            return beta

class HumanPlayer(BasePlayer):
    
    def __init__(self, color, controller):
        
        super(HumanPlayer, self).__init__(color)        
        self.controller = controller        
        
    def make_move(self, board):
        self.controller.is_listening = True
        
        # waits until the input is processed
        while not self.controller.position:
            if not self.controller.is_listening:
                return    
            time.sleep(0.001)
        
        x, y = self.controller.position
        
        if (x, y) == (-1, -1): #PASS
            vertex = Vertex.PASS()
        
        else:
            vertex = Vertex(x, y)
        
        self.controller.position = None
        
        return Move(self.color, vertex)
