"""
改进的MCTS - 结合神经网络
使用神经网络指导搜索，大幅减少模拟次数，提升速度
"""

import numpy as np
import math
from mcts_node import MCTSNode


class AlphaMCTS:
    """
    AlphaGo Zero风格的MCTS
    使用神经网络评估局面，而不是随机模拟
    """
    
    def __init__(self, board, neural_net=None, num_simulations=100, c_puct=1.5):
        """
        初始化
        board: Board对象
        neural_net: 神经网络模型（如果为None则退化为普通MCTS）
        num_simulations: 模拟次数（使用神经网络后可以大幅减少）
        c_puct: 探索常数
        """
        self.board = board
        self.neural_net = neural_net
        self.num_simulations = num_simulations
        self.c_puct = c_puct
        self.use_neural_net = neural_net is not None
    
    def get_best_move(self, current_player, temperature=0):
        """
        获取最佳落子位置
        temperature: 温度参数（0=贪心选择，>0=随机采样）
        """
        # 创建根节点
        root = MCTSNode(state=self.board.board.copy(), player=current_player)
        
        # 执行MCTS搜索
        for _ in range(self.num_simulations):
            node = root
            search_path = [node]
            
            # 1. 选择 - 找到叶子节点
            while node.is_fully_expanded() and not self._is_terminal(node):
                node = self._select_child(node)
                search_path.append(node)
            
            # 2. 扩展 - 如果不是终局，扩展一个子节点
            if not self._is_terminal(node):
                node = self._expand(node)
                search_path.append(node)
            
            # 3. 评估 - 使用神经网络或模拟
            value = self._evaluate(node)
            
            # 4. 回溯 - 更新路径上所有节点
            self._backpropagate(search_path, value)
        
        # 选择最佳动作
        return self._select_action(root, temperature)
    
    def _select_child(self, node):
        """选择最有潜力的子节点（UCB公式）"""
        best_score = -float('inf')
        best_child = None
        
        for child in node.children:
            # UCB = Q + U
            # Q: 平均价值
            # U: 探索奖励
            q_value = child.value / (child.visits + 1e-8)
            u_value = self.c_puct * child.prior * math.sqrt(node.visits) / (1 + child.visits)
            score = q_value + u_value
            
            if score > best_score:
                best_score = score
                best_child = child
        
        return best_child
    
    def _expand(self, node):
        """扩展节点"""
        legal_moves = self._get_legal_moves(node.state)
        
        if not legal_moves:
            return node
        
        # 使用神经网络获取先验概率
        if self.use_neural_net:
            policy_probs, _ = self.neural_net.predict(node.state)
            policy_probs = self._mask_invalid_moves(policy_probs, legal_moves)
        else:
            # 均匀分布
            policy_probs = np.ones(len(legal_moves)) / len(legal_moves)
        
        # 创建子节点
        for i, move in enumerate(legal_moves):
            new_state = node.state.copy()
            new_state[move] = node.player
            
            child = MCTSNode(
                state=new_state,
                player=-node.player,
                parent=node,
                move=move,
                prior=policy_probs[i] if self.use_neural_net else policy_probs[i]
            )
            node.children.append(child)
        
        # 返回第一个子节点进行评估
        return node.children[0] if node.children else node
    
    def _evaluate(self, node):
        """评估节点价值"""
        # 检查是否终局
        winner = self._check_winner(node.state)
        if winner is not None:
            return 1.0 if winner == node.player else -1.0
        
        # 使用神经网络评估
        if self.use_neural_net:
            _, value = self.neural_net.predict(node.state)
            return value if node.player == 1 else -value
        
        # 退化为随机模拟
        return self._simulate(node)
    
    def _simulate(self, node):
        """随机模拟（当没有神经网络时使用）"""
        state = node.state.copy()
        current_player = node.player
        
        for _ in range(50):  # 最多模拟50步
            legal_moves = self._get_legal_moves(state)
            if not legal_moves:
                return 0.0
            
            # 随机选择
            move = legal_moves[np.random.randint(len(legal_moves))]
            state[move] = current_player
            
            # 检查胜负
            winner = self._check_winner(state)
            if winner is not None:
                return 1.0 if winner == node.player else -1.0
            
            current_player = -current_player
        
        return 0.0  # 平局
    
    def _backpropagate(self, search_path, value):
        """回溯更新"""
        for node in reversed(search_path):
            node.visits += 1
            node.value += value
            value = -value  # 切换视角
    
    def _select_action(self, root, temperature):
        """选择最终动作"""
        visits = np.array([child.visits for child in root.children])
        moves = [child.move for child in root.children]
        
        if temperature == 0:
            # 贪心选择
            best_idx = np.argmax(visits)
            return moves[best_idx]
        else:
            # 按访问次数采样
            probs = visits ** (1.0 / temperature)
            probs /= probs.sum()
            idx = np.random.choice(len(moves), p=probs)
            return moves[idx]
    
    def _get_legal_moves(self, state):
        """获取合法落子位置"""
        moves = []
        for i in range(state.shape[0]):
            for j in range(state.shape[1]):
                if state[i, j] == 0:
                    moves.append((i, j))
        return moves
    
    def _mask_invalid_moves(self, policy_probs, legal_moves):
        """屏蔽非法落子"""
        board_size = self.board.size
        masked_probs = np.zeros(len(legal_moves))
        
        for i, move in enumerate(legal_moves):
            idx = move[0] * board_size + move[1]
            masked_probs[i] = policy_probs[idx]
        
        # 重新归一化
        if masked_probs.sum() > 0:
            masked_probs /= masked_probs.sum()
        else:
            masked_probs = np.ones(len(legal_moves)) / len(legal_moves)
        
        return masked_probs
    
    def _is_terminal(self, node):
        """判断是否终局"""
        return self._check_winner(node.state) is not None or len(self._get_legal_moves(node.state)) == 0
    
    def _check_winner(self, state):
        """检查胜负"""
        board_size = state.shape[0]
        
        # 检查所有方向
        for i in range(board_size):
            for j in range(board_size):
                if state[i, j] == 0:
                    continue
                
                player = state[i, j]
                
                # 横向
                if j <= board_size - 5:
                    if all(state[i, j+k] == player for k in range(5)):
                        return player
                
                # 纵向
                if i <= board_size - 5:
                    if all(state[i+k, j] == player for k in range(5)):
                        return player
                
                # 斜向 ↘
                if i <= board_size - 5 and j <= board_size - 5:
                    if all(state[i+k, j+k] == player for k in range(5)):
                        return player
                
                # 斜向 ↙
                if i <= board_size - 5 and j >= 4:
                    if all(state[i+k, j-k] == player for k in range(5)):
                        return player
        
        return None
    
    def get_action_probs(self, temperature=1.0):
        """
        获取所有动作的概率分布（用于训练）
        返回: (move, prob) 列表
        """
        root = MCTSNode(state=self.board.board.copy(), player=self.board.current_player)
        
        for _ in range(self.num_simulations):
            node = root
            search_path = [node]
            
            while node.is_fully_expanded() and not self._is_terminal(node):
                node = self._select_child(node)
                search_path.append(node)
            
            if not self._is_terminal(node):
                node = self._expand(node)
                search_path.append(node)
            
            value = self._evaluate(node)
            self._backpropagate(search_path, value)
        
        # 计算概率分布
        visits = np.array([child.visits for child in root.children])
        moves = [child.move for child in root.children]
        
        if temperature == 0:
            probs = np.zeros(len(visits))
            probs[np.argmax(visits)] = 1.0
        else:
            probs = visits ** (1.0 / temperature)
            probs /= probs.sum()
        
        return list(zip(moves, probs))


if __name__ == "__main__":
    print("🧪 测试AlphaMCTS...")
    from board import Board
    
    board = Board()
    mcts = AlphaMCTS(board, num_simulations=100)
    
    print("✅ 不使用神经网络的MCTS")
    move = mcts.get_best_move(current_player=1)
    print(f"推荐落子: {move}")
    
    print("\n✅ AlphaMCTS测试完成！")
