import torch
import os
from rainforeLearn.gomoku.v2.configs.config import GomokuDQNConfig
from rainforeLearn.gomoku.v2.train.expert.expert_data_loader import ExpertDataLoader
from rainforeLearn.gomoku.v2.util.mcts import SimpleMCTS
from rainforeLearn.gomoku.v2.util.replay_buffer import MultiStepReplayBuffer
from rainforeLearn.gomoku.v2.agents.network_manager import NetworkManager
from rainforeLearn.gomoku.v2.agents.action_selector import ActionSelector
from rainforeLearn.gomoku.v2.agents.dqn_trainer import DQNTrainer


class GomokuDQNAgent:
    """五子棋DQN智能体 - 重构版本，职责分离清晰"""
    
    def __init__(self, config: GomokuDQNConfig):
        self.config = config
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
        # 初始化组件
        self._initialize_components()
    
    def _initialize_components(self):
        """初始化各个组件"""
        # 网络管理器
        self.network_manager = NetworkManager(self.config, self.device)
        
        # 动作选择器
        self.action_selector = ActionSelector(self.config, self.device)
        
        # 经验回放缓冲区
        self.replay_buffer = MultiStepReplayBuffer(
            capacity=self.config.replay_buffer_size,
            n_step=self.config.n_step,
            gamma=self.config.multi_step_gamma,
            use_prioritized=self.config.use_prioritized_replay,
            alpha=self.config.priority_alpha
        )
        
        # 训练器
        self.trainer = DQNTrainer(
            self.config, self.network_manager, self.replay_buffer, self.device
        )
        
        # MCTS采样器
        self.mcts = self._create_mcts() if self.config.use_mcts_sampling else None
    
        # 专家数据采集器
        self.expert_data_loader = ExpertDataLoader(self.config) if self.config.use_expert_data else None

    def _create_mcts(self):
        """创建MCTS采样器"""
        return SimpleMCTS(
            self.network_manager.policy_net,
            c_puct=self.config.mcts_c_puct,
            n_simulations=self.config.mcts_simulations
        )
    
    def select_action(self, env_state, epsilon=None, use_mcts=False):
        """选择动作"""
        return self.action_selector.select_action(
            self.network_manager.policy_net, env_state, self.mcts, epsilon, use_mcts
        )

    def compute_epsilon(self):
        """计算当前epsilon值"""
        return self.action_selector.compute_epsilon()


    def update(self):
        """更新网络参数"""
        # 使用专家数据进行混合训练
        if self.config.use_expert_data and self.expert_data_loader:
            # 计算专家数据的批次大小
            expert_batch_size = int(self.config.dqn_batch_size * self.config.expert_data_ratio)
            replay_batch_size = self.config.dqn_batch_size - expert_batch_size
            
            # 从经验回放缓冲区采样
            if len(self.replay_buffer) >= replay_batch_size:
                replay_batch = self.replay_buffer.sample(replay_batch_size)
                
                # 从专家数据采样
                expert_batch = self.expert_data_loader.sample_batch(expert_batch_size)
                
                # 合并两种数据 - 修复类型不匹配问题
                # 将expert_batch转换为元组
                if isinstance(expert_batch, list):
                    expert_batch = tuple(expert_batch)
                    
             
                combined_batch = replay_batch + expert_batch
                
                # 使用合并的批次进行训练
                return self.trainer.update_with_batch(combined_batch, self.action_selector.steps_done)
        
        # 如果不使用专家数据或专家数据不可用，使用原始方法
        return self.trainer.update(self.action_selector.steps_done)

    def store_transition(self, state, action, reward, next_state, done):
        """存储经验"""
        self.replay_buffer.push(state, action, reward, next_state, done)
    
    def get_training_stats(self):
        """获取训练统计"""
        stats = self.trainer.get_statistics(self.config.training_stats_batch_freq)
        
        # 添加其他统计信息
        stats.update({
            'learning_rate': self.network_manager.get_current_lr(),
            'steps_done': self.action_selector.steps_done,
            'epsilon': self.action_selector.compute_epsilon(),
            'buffer_size': len(self.replay_buffer)
        })
        
        return stats
    
    def apply_batch_reward_to_experiences(self, net_batch_reward, experiences_count=None):
        """将批次奖励应用到最近的经验中"""
        if len(self.replay_buffer) == 0 or net_batch_reward == 0:
            return
        
        if experiences_count is None:
            experiences_count = min(1000, len(self.replay_buffer))
        else:
            experiences_count = min(experiences_count, len(self.replay_buffer))
        
        reward_per_experience = net_batch_reward / experiences_count
        
        buffer = self.replay_buffer.buffer
        current_pos = self.replay_buffer.position
        
        modified_count = 0
        for i in range(experiences_count):
            pos = (current_pos - 1 - i) % self.replay_buffer.capacity
            
            if buffer[pos] is not None:
                state, action, old_reward, next_state, done = buffer[pos]
                new_reward = old_reward + reward_per_experience
                buffer[pos] = (state, action, new_reward, next_state, done)
                modified_count += 1
        
        return modified_count
    
    def apply_batch_reward_by_episodes(self, net_batch_reward, num_episodes, avg_steps_per_episode=50):
        """根据游戏局数应用批次奖励"""
        experiences_count = num_episodes * avg_steps_per_episode
        return self.apply_batch_reward_to_experiences(net_batch_reward, experiences_count)
    

    def save(self, path):
        """保存模型"""
        os.makedirs(os.path.dirname(path), exist_ok=True)
        
        checkpoint = {
            'policy_net_state_dict': self.network_manager.policy_net.state_dict(),
            'target_net_state_dict': self.network_manager.target_net.state_dict(),
            'optimizer_state_dict': self.network_manager.optimizer.state_dict(),
            'steps_done': self.action_selector.steps_done,
            'target_update_counter': self.network_manager.target_update_counter,
            'config': self.config
        }
        
        if self.network_manager.scheduler:
            checkpoint['scheduler_state_dict'] = self.network_manager.scheduler.state_dict()
        
        torch.save(checkpoint, path)
    
    def load(self, path):
        """加载模型"""
        checkpoint = torch.load(path, map_location=self.device)
        
        self.network_manager.policy_net.load_state_dict(checkpoint['policy_net_state_dict'])
        self.network_manager.target_net.load_state_dict(checkpoint['target_net_state_dict'])
        self.network_manager.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.action_selector.steps_done = checkpoint.get('steps_done', 0)
        self.network_manager.target_update_counter = checkpoint.get('target_update_counter', 0)
        
        if (self.network_manager.scheduler and 
            'scheduler_state_dict' in checkpoint):
            self.network_manager.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
