import torch
import numpy as np
from gomoku_net import GomokuNet
import gc  # 新增垃圾回收

# 全局变量
DEVICE = None
worker_model = None

def init_worker(model_state, use_cuda):
    global worker_model, DEVICE
    DEVICE = torch.device("cuda" if use_cuda else "cpu")
    worker_model = GomokuNet()
    worker_model.load_state_dict(model_state)
    worker_model.eval()
    if use_cuda:
        # 显式创建新CUDA上下文避免冲突
        with torch.cuda.device(DEVICE):
            worker_model.cuda()
class MCTSNode:
    """蒙特卡洛树节点类"""
    def __init__(self, parent, state, action, current_player=None):
        self.parent = parent
        self.state = state
        self.action = action
        self.children = []
        self.visit_count = 0
        self.value_sum = 0
        self.policy = 0.0
        self.current_player = current_player  # 新增当前玩家属性


class MCTS:
    """蒙特卡洛树搜索与Alpha-Beta剪枝混合算法"""

    def __init__(self, model, num_simulations=300, alpha_beta_depth=2):
        """
        初始化MCTS类
        :param model: 用于策略和值预测的神经网络模型
        :param num_simulations: 每次搜索的模拟次数
        :param alpha_beta_depth: Alpha-Beta剪枝的搜索深度
        """
        self.model = model
        self.num_simulations = num_simulations
        self.alpha_beta_depth = alpha_beta_depth
        self.root = None  # 根节点初始化为None

    def search(self, state, current_player):
        """执行蒙特卡洛树搜索"""
        self.root = MCTSNode(parent=None, state=state, action=None, current_player=current_player)
        for _ in range(self.num_simulations):
            node = self.select(self.root)
            reward = self.simulate(node.state, node.current_player)
            self.backpropagate(node, reward)
        return self.get_best_child(self.root).action

    def select(self, node):
        """选择阶段：从根节点向下遍历到叶节点"""
        while not self.is_terminal(node.state):
            if not node.children:
                return self.expand(node)
            node = self.get_best_child(node, exploration_weight=1.0)
        return node

    def expand(self, node):
        """扩展阶段：为当前节点生成子节点（优化版）"""
        valid_actions = self.get_valid_actions(node.state)
        if not valid_actions:
            return None  # 无合法动作时终止
        
        # 使用模型预测策略概率（显式设备管理）
        with torch.autocast(device_type=DEVICE.type, dtype=torch.float16):
            state_tensor, _ = state_to_tensor(node.state, node.current_player, augment=False)
            policy, _ = self.model(state_tensor.unsqueeze(0).to(DEVICE))
            policy_probs = torch.softmax(policy, dim=1).cpu().numpy().flatten()
        
        # 提取合法动作的概率并归一化
        valid_probs = policy_probs[valid_actions]
        valid_probs = valid_probs / valid_probs.sum() if valid_probs.sum() != 0 else np.ones_like(valid_probs)/len(valid_probs)
        
        # 生成子节点（优化内存分配）
        node.children = [
            MCTSNode(
                parent=node,
                state=self.apply_action(node.state.copy(), action, node.current_player),
                action=action,
                current_player=3 - node.current_player  # 切换玩家
            ) 
            for action in valid_actions
        ]
        
        # 返回第一个子节点（按概率排序）
        return node.children[np.argmax(valid_probs)]

    def simulate(self, state, current_player):
        # 传递初始玩家作为maximizing_player的判断依据
        return self.alpha_beta_search(state, self.alpha_beta_depth, -np.inf, np.inf, current_player, current_player)

    def alpha_beta_search(self, state, depth, alpha, beta, maximizing_player, current_player):
        if depth == 0 or self.is_terminal(state):
            winner = self.determine_winner(state)
            return 1 if winner == maximizing_player else -1 if winner != 0 else 0

        if current_player == maximizing_player:  # 当前玩家是最大化方
            value = -np.inf
            for action in self.get_valid_actions(state):
                new_state = self.apply_action(state.copy(), action, current_player)
                next_player = 3 - current_player
                new_value = self.alpha_beta_search(new_state, depth-1, alpha, beta, maximizing_player, next_player)
                value = max(value, new_value)
                alpha = max(alpha, value)
                if alpha >= beta:
                    break
            return value
        else:
            value = np.inf
            for action in self.get_valid_actions(state):
                new_state = self.apply_action(state.copy(), action, current_player)
                next_player = 3 - current_player
                new_value = self.alpha_beta_search(new_state, depth-1, alpha, beta, maximizing_player, next_player)
                value = min(value, new_value)
                beta = min(beta, value)
                if alpha >= beta:
                    break
            return value

    def backpropagate(self, node, value):
        """反向传播阶段：更新节点的访问次数和价值"""
        while node is not None:
            node.visit_count += 1
            node.value_sum += value
            node = node.parent

    def get_best_child(self, node, exploration_weight=1.0):
        """UCB公式选择最佳子节点"""
        def ucb_score(n):
            if n.visit_count == 0:
                return float('inf')
            return n.value_sum / n.visit_count + exploration_weight * np.sqrt(
                np.log(node.visit_count) / n.visit_count
            )
        return max(node.children, key=ucb_score, default=None)

    def get_valid_actions(self, state):
        """获取所有合法动作（空位）"""
        valid = np.where(state.reshape(-1) == 0)[0]
        return valid

    def apply_action(self, state, action, player):
        """应用动作并返回二维数组"""
        state = state.reshape(15, 15)  # 强制转为二维
        y, x = divmod(action, 15)
        state[y, x] = player
        return state.reshape(15, 15)  # 确保返回二维

    def is_terminal(self, state):
        """判断是否达到终局状态"""
        state = state.reshape(15, 15)  # 确保state为二维数组
        for y in range(15):
            for x in range(15):
                if state[y, x] != 0 and check_win(state, y, x):
                    return True
        return np.all(state != 0)

    def determine_winner(self, state):
        """精确判断胜利者"""
        state = state.reshape(15, 15)  # 确保state为二维数组
        for y in range(15):
            for x in range(15):
                if state[y, x] != 0 and check_win(state, y, x):
                    return state[y, x]
        return 0


    def reset(self):
        """重置MCTS，释放所有节点内存"""
        if self.root:
            self.clear_node(self.root)  # 从根节点开始递归清除
        self.root = None  # 重置根节点


    def clear_node(self, node):
        if node:
            node.parent = None  # 先切断父引用
            children = node.children.copy()  # 避免迭代时修改列表
            for child in children:
                self.clear_node(child)
                del child  # 显式删除子节点
            node.children.clear()  # 清空子节点列表
            del node

def play_game_worker(args):
    """多进程对弈工作函数（重构版）"""
    game_idx, start_player = args
    mcts = None
    try:
        # 初始化棋盘和数据结构
        state = np.zeros((15, 15), dtype=np.float32)
        game_states = []
        current_player = start_player
        mcts = MCTS(worker_model, num_simulations=200)
        max_steps = 15 * 15
        winner = 0  # 0: 平局, 1/2: 玩家

        # 游戏主循环
        for step_count in range(max_steps):
            # 检查棋盘是否已满（强制二维判断）
            if np.all(state.reshape(15, 15) != 0):
                winner = 0
                break

            # MCTS搜索获取动作
            action = mcts.search(state.reshape(15, 15), current_player)
            y, x = divmod(action, 15)

            # 记录当前状态（确保策略归一化）
            policy = np.array([child.visit_count for child in mcts.root.children])
            policy_sum = policy.sum()
            policy = policy / policy_sum if policy_sum != 0 else np.ones_like(policy)/len(policy)
            
            game_states.append({
                'state': state.copy(),
                'policy': policy,
                'current_player': current_player
            })

            # 更新棋盘状态（强制保持二维）
            state = state.reshape(15, 15)
            state[y, x] = current_player
            state = state.flatten()  # 保持一维用于后续处理

            # 胜利检查（严格二维判断）
            if check_win(state.reshape(15, 15), y, x):
                winner = current_player
                break

            # 切换玩家
            current_player = 3 - current_player

        # 后处理策略和值
        for idx, step in enumerate(game_states):
            value = 0.0
            if winner != 0:
                value = 1.0 if winner == step['current_player'] else -1.0
            step['value'] = value

        # 转换为numpy数组并返回
        states = np.stack([s['state'].reshape(15, 15) for s in game_states])  # 强制二维
        policies = np.stack([s['policy'] for s in game_states])
        values = np.array([s['value'] for s in game_states], dtype=np.float32)
        
        return (states, policies, values)

    except Exception as e:
        print(f"Worker {game_idx} failed: {str(e)}")
        return None

    finally:
        # 显式资源清理（关键修复）
        if mcts is not None:
            mcts.reset()
            del mcts
        del state, game_states
        torch.cuda.empty_cache()
        gc.collect()

def state_to_tensor(state, current_player=1, augment=True):
    """增强数据时记录变换参数"""
    transform_params = (0, False)  # (旋转次数, 是否翻转)
    if augment and np.random.rand() > 0.5:
        k = np.random.randint(4)
        state = np.rot90(state, k=k).copy()
        if np.random.rand() > 0.5:
            state = np.fliplr(state).copy()
            transform_params = (k, True)
        else:
            transform_params = (k, False)
    
    current = (state == current_player).astype(np.float32)
    opponent = (state == (3 - current_player)).astype(np.float32)
    return torch.from_numpy(np.stack([current, opponent])).float(), transform_params

def adjust_policy(policy, action, transform_params):
    """根据变换参数调整动作索引（修复坐标变换顺序）"""
    k, flip = transform_params
    y, x = divmod(action, 15)
    
    for _ in range(k):
        x, y = 14 - y, x  # 逆时针旋转的逆向
    if flip:
        x = 14 - x
    
    if not (0 <= x < 15 and 0 <= y < 15):
        raise ValueError(f"无效坐标转换：{action} -> ({y}, {x})")
    return y * 15 + x

def check_win(board, y, x):
    board = board.reshape(15, 15)
    player = board[y, x].item()  # 确保player是标量
    
    directions = [
        (0, 1), (1, 0), (1, 1), (1, -1)
    ]
    
    for dy, dx in directions:
        count = 1
        # 正向检查
        step = 1
        while True:
            ny, nx = y + dy * step, x + dx * step
            if 0 <= ny < 15 and 0 <= nx < 15:
                current = board[ny, nx].item()  # 确保current是标量
                if current == player:
                    count += 1
                    step += 1
                else:
                    break
            else:
                break
        # 反向检查
        step = 1
        while True:
            ny, nx = y - dy * step, x - dx * step
            if 0 <= ny < 15 and 0 <= nx < 15:
                current = board[ny, nx].item()  # 确保current是标量
                if current == player:
                    count += 1
                    step += 1
                else:
                    break
            else:
                break
        if count >= 5:
            return True
    return False