import torch
import numpy as np
import random
import math

from rainforeLearn.gomoku.v2.neywork.state_processor import GomokuStateProcessor


class ActionSelector:
    """动作选择器 - 负责动作选择策略"""
    
    def __init__(self, config, device):
        self.config = config
        self.device = device
        self.steps_done = 0
    
    def select_action(self, policy_net, env_state, mcts=None, epsilon=None, use_mcts=False):
        """选择动作"""
        legal_actions = env_state['legal_actions']
        if not legal_actions:
            return -1
        
        if use_mcts and mcts:
            return self._select_with_mcts(mcts, env_state, legal_actions)
        else:
            return self._select_with_epsilon_greedy(policy_net, env_state, legal_actions, epsilon)
    
    def _select_with_mcts(self, mcts, env_state, legal_actions):
        """使用MCTS选择动作"""
        action_probs = mcts.search(env_state)
        legal_probs = action_probs[legal_actions]
        legal_probs = legal_probs / legal_probs.sum()
        action = np.random.choice(legal_actions, p=legal_probs)
        self.steps_done += 1
        return action
    
    def _select_with_epsilon_greedy(self, policy_net, env_state, legal_actions, epsilon):
        """使用ε-贪婪策略选择动作"""
        if epsilon is None:
            epsilon = self.compute_epsilon()
        
        # NoisyNet探索
        if self.config.use_noisy:
            policy_net.reset_noise()
            epsilon = 0
        
        if random.random() > epsilon:
            action = self._exploit_action(policy_net, env_state, legal_actions)
        else:
            action = self._explore_action(legal_actions)
        
        self.steps_done += 1
        return action
    
    def _exploit_action(self, policy_net, env_state, legal_actions):
        """利用策略选择动作"""
        with torch.no_grad():
            state_tensor = self._env_state_to_tensor(env_state)
            q_values = policy_net(state_tensor)
            q_values = q_values.cpu().numpy().flatten()
            
            # 只考虑合法动作
            legal_q_values = q_values[legal_actions]
            best_legal_idx = np.argmax(legal_q_values)
            return legal_actions[best_legal_idx]
    
    def _explore_action(self, legal_actions):
        """探索策略选择动作"""
        return random.choice(legal_actions)
    
    def _env_state_to_tensor(self, env_state):
        """将环境状态转换为网络输入张量"""

        board = env_state['board']
        current_player = env_state['current_player']
        
        # 使用状态处理器
        state_processor = GomokuStateProcessor(
            board_size=self.config.board_size,
            history_steps=self.config.history_steps
        )
        
        board_batch = torch.FloatTensor(board).unsqueeze(0)
        action_history_batch = [[]]
        cur_player_batch = torch.tensor([current_player])
        
        state = state_processor.process_state(
            board_batch, action_history_batch, cur_player_batch
        )
        
        return state.to(self.device)
    
    def compute_epsilon(self):
        """计算当前epsilon值"""
        episode = self.steps_done // self.config.epsilon_group_steps
        return self.config.epsilon_end + \
            (self.config.epsilon_start - self.config.epsilon_end) * \
            math.exp(-1. * episode / self.config.epsilon_decay)