import numpy as np
import torch


class SimpleMCTS:
    """简化版MCTS，用于生成高质量样本"""
    def __init__(self, network, c_puct=1.0, n_simulations=50):
        self.network = network
        self.c_puct = c_puct
        self.n_simulations = n_simulations

    def search(self, env_state):
        """MCTS搜索，返回动作概率分布"""
        legal_actions = env_state['legal_actions']
        if not legal_actions:
            return np.zeros(225)  # 15*15

        # 简化版：使用网络直接评估 + 随机性
        with torch.no_grad():
            # 这里需要将env_state转换为网络输入格式
            # 简化处理
            state_tensor = self._state_to_tensor(env_state)
            q_values = self.network(state_tensor)
            q_values = q_values.cpu().numpy().flatten()

        # 只考虑合法动作
        action_probs = np.zeros(225)
        legal_q_values = q_values[legal_actions]

        # 添加探索噪声
        legal_q_values += np.random.normal(0, 0.1, len(legal_q_values))

        # Softmax转换为概率
        exp_values = np.exp(legal_q_values - np.max(legal_q_values))
        legal_probs = exp_values / np.sum(exp_values)

        action_probs[legal_actions] = legal_probs
        return action_probs

    def _state_to_tensor(self, env_state):
        """将环境状态转换为网络输入"""
        # 简化处理，实际需要使用GomokuStateProcessor
        board = env_state['board']
        # 创建多通道输入
        state = np.zeros((1, 6, 15, 15))  # batch_size=1, 2+4 channels

        # 当前玩家通道
        state[0, 0] = (board == env_state['current_player']).astype(np.float32)
        # 对手通道
        state[0, 1] = (board == -env_state['current_player']).astype(np.float32)
        # 历史通道（简化为0）

        return torch.FloatTensor(state)