import os
from typing import Dict, Optional

from rainforeLearn.gomoku.v2.train.statistics.tracker.threat_defense_tracker import ThreatDefenseTracker
from rainforeLearn.gomoku.v2.train.statistics.tracker.episode_game_result_tracker import EpisodeGameResultTracker
from rainforeLearn.gomoku.v2.train.statistics.tracker.reward_penalty_tracker import RewardPenaltyTracker


class TrainingStatsPrinter:
    """五子棋训练统计打印器 - 统一打印所有训练统计信息"""

    def __init__(self, config, enable_advanced_analysis: bool = True):
        self.config = config
        self.enable_advanced_analysis = enable_advanced_analysis

        # 核心组件
        self.episode_game_tracker = EpisodeGameResultTracker()
        self.reward_penalty_tracker = RewardPenaltyTracker()
        self.threat_tracker = ThreatDefenseTracker()

    # 统计打印方法
    def print_net_buffer_reward_stats(self, episode: int, agent_stats: Dict,
                                      epsilon: float, reward_summary: Optional[Dict] = None) -> None:
        """打印详细训练统计信息"""
        print(f"\n📊 Episode {episode} 智能训练统计:")

        self._print_network_performance(agent_stats, epsilon)
        self._print_buffer_status(agent_stats)

        if reward_summary:
            self._print_penalty_coefficient_info(reward_summary)
            self._print_reward_penalty_system(reward_summary)
            self._print_threat_detection_system()

    def _print_network_performance(self, agent_stats: Dict, epsilon: float) -> None:
        """打印网络性能信息"""
        print(f"   🧠 网络性能:")
        print(f"      平均Loss: {agent_stats.get('avg_loss', 0):.4f}")
        print(f"      平均Q值: {agent_stats.get('avg_q_value', 0):.3f}")
        print(f"      梯度范数: {agent_stats.get('avg_grad_norm', 0):.4f}")
        print(f"      学习率: {agent_stats.get('learning_rate', 0):.6f}")
        print(f"      Epsilon: {epsilon:.3f}")

    def _print_buffer_status(self, agent_stats: Dict) -> None:
        """打印Buffer状态信息"""
        print(f"   📦 Buffer状态:")
        print(f"      Buffer大小: {agent_stats.get('buffer_size', 0)}")
        print(f"      训练步数: {agent_stats.get('steps_done', 0)}")

    def _print_penalty_coefficient_info(self, reward_summary: Dict) -> None:
        """打印动态惩罚系数信息"""
        print(f"   🔮 动态惩罚系数:")
        print(f"      状态: {reward_summary.get('system_status', 'N/A')}")
        print(f"      当前系数: {reward_summary.get('current_coefficient', 0):.1f}")

        # 显示最近的系数变化
        coefficient_history = reward_summary.get('coefficient_history', [])
        if len(coefficient_history) >= 2:
            recent_change = coefficient_history[-1][1] - coefficient_history[-2][1]
            trend = "↗️" if recent_change > 0 else "↘️" if recent_change < 0 else "➡️"
            print(f"      最近变化: {recent_change:+.3f} {trend}")

    def _print_reward_penalty_system(self, reward_summary: Dict) -> None:
        """打印奖励惩罚系统信息"""
        net_reward = reward_summary.get('net_bonus', 0)
        batch_net_reward = (self.reward_penalty_tracker.win_rate_bonus_history[-1][1] -
                            self.reward_penalty_tracker.batch_penalty_history[-1][1])

        print(f"   💰 奖励罚系统:")
        print(f"      🎁 总体胜率奖励: +{reward_summary.get('total_batch_bonus', 0):.1f}")
        print(f"      🚨 总体惩罚: -{reward_summary.get('total_batch_penalties', 0):.1f}")
        print(f"      📊 总体净收益: {net_reward:+.1f}")
        print(f"      🎖 批次胜率奖励: +{self.reward_penalty_tracker.win_rate_bonus_history[-1][1]:.1f}")
        print(f"      🥊 批次惩罚: -{self.reward_penalty_tracker.batch_penalty_history[-1][1]:.1f}")
        print(f"      📊 批次净收益: {batch_net_reward:+.1f}")

    def _print_threat_detection_system(self) -> None:
        """打印威胁检测系统信息"""
        if self.threat_tracker.total_threats > 0:
            critical_rate = self.threat_tracker.get_critical_threat_rate()

            print(f"   🛡️ 威胁检测系统:")
            print(f"      威胁阻止总数: {self.threat_tracker.total_threats}")
            print(f"      关键威胁: {self.threat_tracker.critical_threats}次({critical_rate:.1%})")
            print(f"      防守奖励总计: {self.threat_tracker.total_defense_bonus_awarded:.2f}")
            print(f"      多重威胁防守: {self.threat_tracker.multiple_threat_defenses}次")

    def print_win_fail_stats(self, episode: int, epsilon: float) -> None:
        """输赢状态统计"""
        episodes_in_batch = min(self.config.training_stats_batch_freq, episode + 1)
        wins_in_last_batch = self.episode_game_tracker.get_wins_in_last_batch()
        win_rate_last_batch = wins_in_last_batch / episodes_in_batch

        total_win_rate = self.episode_game_tracker.get_total_win_rate()

        self.episode_game_tracker.win_rate_history.append((episode + 1, self.episode_game_tracker.total_wins, total_win_rate))

        print(f"\n📊 Episode {episode + 1}: 最近{episodes_in_batch}局胜{wins_in_last_batch}局 "
              f"(胜率: {win_rate_last_batch:.2%}), "
              f"总计: 胜{self.episode_game_tracker.total_wins} 负{self.episode_game_tracker.total_losses} 平{self.episode_game_tracker.total_draws},"
              f" 总胜率: {total_win_rate:.2%}, Epsilon: {epsilon:.3f}")

        self._print_threat_summary()

    def _print_threat_summary(self) -> None:
        """打印威胁检测摘要"""
        if self.threat_tracker.total_threats > 0:
            critical_rate = self.threat_tracker.get_critical_threat_rate()
            print(f"   🛡️ 威胁防守: 总计{self.threat_tracker.total_threats}次, "
                  f"关键威胁{self.threat_tracker.critical_threats}次({critical_rate:.1%})")

    def print_final_stats(self, total_training_time: float = 0) -> None:
        """打印最终智能统计"""
        total_games = self.episode_game_tracker.get_total_games()

        print("\n" + "=" * 80)
        print("🧠 智能训练统计汇总:")
        print(f"总游戏数: {total_games}")

        if total_games > 0:
            self._print_game_results_summary()

        if total_training_time > 0:
            print(f"总训练时间: {total_training_time / 3600:.2f} 小时")

        self._print_reward_penalty_summary()
        self._print_threat_detection_summary()

        print("=" * 80)

    def _print_game_results_summary(self) -> None:
        """打印游戏结果汇总"""

        total_win_rate = self.episode_game_tracker.get_total_win_rate() * 100
        total_loss_rate = self.episode_game_tracker.get_total_losses_rate() * 100
        total_draw_rate = self.episode_game_tracker.get_total_draws_rate() * 100

        print(f"胜利: {self.episode_game_tracker.total_wins} ({total_win_rate:.1f}%)")
        print(f"失败: {self.episode_game_tracker.total_losses} ({total_loss_rate:.1f}%)")
        print(f"平局: {self.episode_game_tracker.total_draws} ({total_draw_rate:.1f}%)")

    def print_reward_summary(self) -> None:
        """打印奖励总结"""
        print(f"\n💰 奖励系统总结:")
        print(f"🎁 胜率奖励总计: {self.reward_penalty_tracker.total_batch_bonus:.1f}")
        print(f"🧠 智能惩罚总计: {self.reward_penalty_tracker.total_batch_penalties:.1f}")
        print(f"📊 净收益: {self.reward_penalty_tracker.get_net_reward():.1f}")

    def _print_reward_penalty_summary(self) -> None:
        """打印奖励惩罚汇总"""
        net_bonus = self.reward_penalty_tracker.get_net_bonus()
        status = '盈利' if net_bonus > 0 else '亏损' if net_bonus < 0 else '持平'

        print(f"\n💰 智能奖励惩罚汇总:")
        print(f"🎁 胜率奖励总计: {self.reward_penalty_tracker.total_batch_bonus:.1f}")
        print(f"🧠 智能惩罚总计: {self.reward_penalty_tracker.total_batch_penalties:.1f}")
        print(f"📊 最终净收益: {net_bonus:+.1f} ({status})")

    def _print_threat_detection_summary(self) -> None:
        """打印威胁检测汇总"""
        if self.threat_tracker.total_threats > 0:
            print(f"\n🛡️ 威胁检测系统汇总:")
            print(f"   总威胁阻止: {self.threat_tracker.total_threats}")
            print(f"   关键威胁阻止: {self.threat_tracker.critical_threats}")
            print(f"   高威胁阻止: {self.threat_tracker.high_threats}")
            print(f"   中威胁阻止: {self.threat_tracker.medium_threats}")
            print(f"   低威胁阻止: {self.threat_tracker.low_threats}")
            print(f"   多重威胁防守: {self.threat_tracker.multiple_threat_defenses}")
            print(f"   防守奖励总计: {self.threat_tracker.total_defense_bonus_awarded:.2f}")

    def print_training_info(self, total_episodes: int, current_episode: int,
                            agent=None, training_start_time=None) -> None:
        """打印训练信息"""
        print(f"🔥 开始智能自对弈训练 (棋盘大小: {self.config.board_size}x{self.config.board_size})")
        print(f"🎯 目标episodes: {total_episodes}")
        print(f"📊 当前episode: {current_episode}")
        print(f"⏳ 剩余episodes: {total_episodes - current_episode}")
        print(f"🧠 网络配置: {self.config.network_config}")
        print(f"🎁 MCTS采样: {'启用' if self.config.use_mcts_sampling else '禁用'}")
        print(f"🔄 优先级回放: {'启用' if self.config.use_prioritized_replay else '禁用'}")
        if self.config.use_expert_data and agent:
            expert_games_count = len(agent.expert_data_loader.expert_games) if hasattr(agent,
                                                                                       'expert_data_loader') and agent.expert_data_loader else 0
            print(f"📚 专家棋谱: 启用 ({expert_games_count} 局, 比例: {self.config.expert_data_ratio:.2f})")
        else:
            print(f"📚 专家棋谱: 禁用")

        # 如果有训练开始时间，显示开始时间
        if training_start_time:
            print(f"⏰ 训练开始时间: {training_start_time.strftime('%Y-%m-%d %H:%M:%S')}")

    def print_checkpoint_info(self, checkpoint_filepath: str) -> None:
        """打印检查点信息"""
        checkpoint_name = os.path.basename(checkpoint_filepath).replace('.pkl', '')

        print(f"从检查点恢复训练: {checkpoint_filepath}")
        print(f"✅ 成功加载检查点: {checkpoint_name}")
        print(f"📊 恢复到episode: {self.current_episode}")
        print(f"🏆 最佳胜率: {self.best_win_rate:.3f} (episode {self.best_episode})")

        # 打印恢复的统计信息
        net_bonus = self.reward_penalty_tracker.get_net_bonus()
        total_games = self.episode_game_tracker.get_total_games()
        win_rate = self.episode_game_tracker.get_win_rate()
        total_threats = self.threat_tracker.total_threats

        print(f"🎮 总游戏数: {total_games}")
        print(f"📈 当前胜率: {win_rate:.3f}")
        print(f"💰 奖励系统净收益: {net_bonus:+.1f}")
        if total_threats > 0:
            print(f"🛡️ 威胁阻止: {total_threats}次")
