import os
from datetime import datetime
from collections import deque
from typing import Optional, Dict, Any
from tqdm import tqdm

from rainforeLearn.gomoku.v2.agents.dqn_agent import GomokuDQNAgent
from rainforeLearn.gomoku.v2.configs.config import GomokuDQNConfig
from rainforeLearn.gomoku.v2.environment.env import GomokuEnvironment
from rainforeLearn.gomoku.v2.train.output.chart.chart_generator import ChartGenerator
from rainforeLearn.gomoku.v2.train.constants.train_constants import TrainConstants
from rainforeLearn.gomoku.v2.train.reward.reward_system import RewardSystem
from rainforeLearn.gomoku.v2.train.run.game_runner import GameRunner
from rainforeLearn.gomoku.v2.train.checkpoint.checkpoint_manager import CheckpointManager
from rainforeLearn.gomoku.v2.train.checkpoint.training_state_manager import TrainingStateManager
from rainforeLearn.gomoku.v2.train.evaluator.model_evaluator import ModelEvaluator
from rainforeLearn.gomoku.v2.train.statistics.train_stats_printer import TrainingStatsPrinter
from rainforeLearn.gomoku.v2.train.output.log.log_generator import LogGenerator


class SelfPlayTrainer:
    """自对弈训练器 - 支持断点续算和增强统计"""

    def __init__(self, agent: GomokuDQNAgent, config: GomokuDQNConfig):
        self.agent = agent
        self.config = config
        self.env = GomokuEnvironment(config)

        # 核心组件
        self.reward_system = RewardSystem(config)
        self.game_runner = GameRunner(self.env, agent)
        self.checkpoint_manager = CheckpointManager(config)
        self.stats_printer = TrainingStatsPrinter(config, enable_advanced_analysis=config.enable_advanced_analysis)
        self.state_manager = TrainingStateManager(self)

        # 训练统计
        self.episode_rewards = deque(maxlen=TrainConstants.DEFAULT_BUFFER_SIZE)
        self.episode_lengths = deque(maxlen=TrainConstants.DEFAULT_BUFFER_SIZE)
        self.win_rates = deque(maxlen=TrainConstants.DEFAULT_WIN_RATES_SIZE)

        # 训练状态
        self.current_episode = 0
        self.total_episodes = 0
        self.training_start_time = None
        self.total_training_time = 0.0

        # 最佳模型追踪
        self.best_win_rate = 0.0
        self.best_episode = 0

    def train(self, num_episodes: int = 1000, checkpoint_filepath: Optional[str] = None) -> None:
        """训练循环 - 支持断点续算"""
        try:
            self._initialize_training(num_episodes, checkpoint_filepath)
            self._run_training_loop()
            self._finalize_training()
        except KeyboardInterrupt:
            print("\n⚠️ 训练被用户中断")
        except Exception as e:
            print(f"\n❌ 训练过程中出现错误: {e}")
            import traceback
            traceback.print_exc()

    def _initialize_training(self, num_episodes: int, checkpoint_filepath: Optional[str]) -> None:
        """初始化训练"""
        if checkpoint_filepath:
            self._load_checkpoint(checkpoint_filepath)

        self.total_episodes = num_episodes
        self.training_start_time = datetime.now()
        self.stats_printer.print_training_info(self.total_episodes, self.current_episode, self.agent,
                                               self.training_start_time)

    def _run_training_loop(self) -> None:
        """运行训练循环"""
        progress_bar = tqdm(
            range(self.current_episode, self.total_episodes),
            desc="智能训练五子棋AI",
            initial=self.current_episode,
            total=self.total_episodes
        )

        for episode in progress_bar:
            self.current_episode = episode + 1
            self._run_single_episode()
            self._update_network_if_ready()
            self._process_periodic_tasks()

    def _run_single_episode(self) -> None:
        """运行单个episode"""
        use_mcts = self._should_use_mcts()

        episode_reward, episode_length, winner, invalid_moves = self.game_runner.run_single_game(use_mcts)

        # 更新统计
        self._update_episode_statistics(episode_reward, episode_length, winner, invalid_moves)

    def _should_use_mcts(self) -> bool:
        """判断是否应该使用MCTS"""
        return (self.config.use_mcts_sampling and
                self.current_episode % self.config.mcts_update_freq == 0)

    def _update_episode_statistics(self, episode_reward: float, episode_length: int,
                                   winner: int, invalid_moves: int) -> None:
        """更新episode统计"""
        self.episode_rewards.append(episode_reward)
        self.episode_lengths.append(episode_length)
        self.stats_printer.episode_game_tracker.update_game_result(winner, ai_player=TrainConstants.AI_PLAYER)
        self.stats_printer.episode_game_tracker.update_invalid_moves(invalid_moves)
        self.reward_system.add_batch_game_result(winner)

        # 更新威胁检测统计（如果游戏结束）
        if self.env.game_over:
            info = {'threats_detected': {}}  # 这里需要从环境获取实际的威胁信息
            self.stats_printer.threat_tracker.update_threat_stats(info, self.current_episode)

    def _update_network_if_ready(self) -> None:
        """如果准备就绪则更新网络"""
        if len(self.agent.replay_buffer) >= self.config.dqn_batch_size:
            self.agent.update()

    def _process_periodic_tasks(self) -> None:
        """处理周期性任务"""
        # 批次统计和日志打印
        if self._should_process_batch_stats():
            self._process_batch_statistics()
            self._print_training_statistics()

        # 评估和检查点保存
        if self._should_evaluate():
            self._evaluate_and_save_best()

        # 自动保存检查点
        if self._should_auto_save():
            self._save_checkpoint(f"auto_checkpoint_episode_{self.current_episode}")

        # 绘制分析图表
        if self._should_plot_analysis():
            reward_summary = self.reward_system.get_summary()
            self._plot_comprehensive_analysis(reward_summary)

        # 保存训练日志
        if self._should_save_log():
            self._save_training_log(self.config.checkpoint_dir)

    def _should_process_batch_stats(self) -> bool:
        """判断是否应该处理批次统计"""
        return self.current_episode > 0 and (self.current_episode + 1) % self.config.training_stats_batch_freq == 0

    def _should_evaluate(self) -> bool:
        """判断是否应该进行评估"""
        return self.current_episode > 0 and self.current_episode % self.config.evaluation_freq == 0

    def _should_auto_save(self) -> bool:
        """判断是否应该自动保存"""
        return (self.config.auto_save and self.current_episode > 0 and
                self.current_episode % self.config.checkpoint_freq == 0)

    def _should_plot_analysis(self) -> bool:
        """判断是否应该绘制分析图表"""
        return (self.config.enable_advanced_analysis and self.current_episode > 0 and
                self.current_episode % self.config.analysis_save_interval == 0)

    def _should_save_log(self) -> bool:
        """判断是否应该保存日志"""
        return self.current_episode > 0 and self.current_episode % self.config.log_save_freq == 0

    def _process_batch_statistics(self) -> None:
        """处理批次统计"""
        win_rate_bonus, batch_penalty, net_reward = self.reward_system.process_batch_completion(self.current_episode)

        # 应用批次奖励
        if net_reward != 0:
            modified_count = self.agent.apply_batch_reward_by_episodes(
                net_reward,
                self.config.training_stats_batch_freq,
                avg_steps_per_episode=TrainConstants.DEFAULT_AVG_STEPS_PER_EPISODE
            )
            print(f"\n应用批次奖励: {net_reward:.3f}, 修改了 {modified_count} 个经验")

        # 更新统计打印器
        self.stats_printer.reward_penalty_tracker.update_reward_penalty_stats(
            episode_win_rate_bonus=win_rate_bonus,
            episode_batch_penalty=batch_penalty,
            episode=self.current_episode
        )

    def _print_training_statistics(self) -> None:
        """打印训练统计"""
        agent_stats = self.agent.get_training_stats()
        epsilon = self.agent.compute_epsilon()
        reward_summary = self.reward_system.get_summary()

        self.stats_printer.print_net_buffer_reward_stats(self.current_episode, agent_stats, epsilon, reward_summary)
        self.stats_printer.print_win_fail_stats(self.current_episode, epsilon)

    def _evaluate_and_save_best(self) -> None:
        """评估并保存最佳模型"""
        win_rate = self.evaluate()
        self.win_rates.append(win_rate)

        if win_rate > self.best_win_rate:
            self.best_win_rate = win_rate
            self.best_episode = self.current_episode

            best_model_path = os.path.join(self.config.checkpoint_dir, 'best_model.pth')
            self.agent.save(best_model_path)
            print(f"🏆 新的最佳胜率: {win_rate:.3f} (episode {self.current_episode})")

    def evaluate(self, num_games: Optional[int] = None) -> float:
        """评估性能"""
        if num_games is None:
            num_games = self.config.evaluation_games

        evaluator = ModelEvaluator(self.env, self.agent, self.config)
        return evaluator.evaluate(num_games)

    def _save_checkpoint(self, checkpoint_name: str) -> str:
        """保存检查点"""
        trainer_state = self.state_manager.get_current_state()
        return self.checkpoint_manager.save_checkpoint(trainer_state, self.agent, checkpoint_name)

    def _load_checkpoint(self, checkpoint_filepath: str) -> None:
        """加载检查点"""
        trainer_state = self.checkpoint_manager.load_checkpoint(checkpoint_filepath, self.agent)
        self.state_manager.restore_state(trainer_state)
        self.stats_printer.print_checkpoint_info(checkpoint_filepath)

    def _plot_comprehensive_analysis(self,reward_summary: Optional[Dict] = None) -> None:
        """绘制综合分析图表"""
        if not self.config.enable_advanced_analysis:
            return

        stats_data = self._prepare_chart_data()
        chart_generator = ChartGenerator(stats_data)
        chart_generator.generate_comprehensive_analysis_chart(self.current_episode, reward_summary)

    def _prepare_chart_data(self) -> Dict[str, Any]:
        """准备图表数据"""
        return {
            'win_rate_history': self.stats_printer.episode_game_tracker.win_rate_history,
            'win_rate_bonus_history': self.stats_printer.reward_penalty_tracker.win_rate_bonus_history,
            'batch_penalty_history': self.stats_printer.reward_penalty_tracker.batch_penalty_history,
            'threat_defense_stats': self.stats_printer.threat_tracker.to_dict(),
            'loss_history': self.agent.trainer.loss_history,
        }

    def _finalize_training(self) -> None:
        """完成训练"""
        if self.training_start_time:
            elapsed_time = (datetime.now() - self.training_start_time).total_seconds()
            self.total_training_time += elapsed_time

        print("\n🎉 自对弈训练完成!")

        # 使用统计打印器打印最终统计
        self.stats_printer.print_final_stats(self.total_training_time)

        # 保存最终检查点
        final_checkpoint = f"final_episode_{self.current_episode}"
        self._save_checkpoint(final_checkpoint)
        print(f"💾 最终检查点已保存: {final_checkpoint}")

        # 保存最终训练日志
        self._save_training_log(self.config.checkpoint_dir)

        # 生成最终分析图表
        if self.config.enable_advanced_analysis:
            reward_summary = self.reward_system.get_summary()
            self._plot_comprehensive_analysis(self.current_episode, reward_summary)

    def _save_training_log(self, checkpoint_dir: str = 'checkpoints') -> None:
        """保存训练日志"""
        log_generator = LogGenerator(
            self.stats_printer.episode_game_tracker,
            self.stats_printer.reward_penalty_tracker,
            self.stats_printer.threat_tracker
        )
        log_generator.save_training_log(self.current_episode, checkpoint_dir)
