import torch
import random
import numpy as np
import os
from tqdm import tqdm
from collections import deque
import matplotlib.pyplot as plt

# 导入项目中的DQN相关组件
from rainforeLearn.gomoku.v1.dqn.configs.config import DQNConfig
from rainforeLearn.gomoku.v1.dqn.models.q_network import QNetwork, DuelingQNetwork, ConvolutionalDuelingQNetwork
from rainforeLearn.gomoku.v1.dqn.agents.dqn_agent import DQNAgent
from rainforeLearn.gomoku.v1.dqn.utils.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer, AdvancedPrioritizedReplayBuffer, NStepReplayBuffer
from rainforeLearn.gomoku.v1.dqn.utils.logger import Logger
from rainforeLearn.gomoku.v1.gomoku_env_wrapper import GomokuEnvWrapper
from rainforeLearn.gomoku.v1.gomoku_env import GomokuEnv


class GomokuDQNConfig(DQNConfig):
    """五子棋专用DQN配置，继承自通用DQN配置"""
    def __init__(self):
        super().__init__()

        # 五子棋专用默认值
        self.intermediate_reward_scale = 0.25  # 中间奖励缩放系数
        self.win_reward = 20.0  # 获胜奖励
        self.lose_penalty = -10.0  # 失败惩罚
        self.invalid_move_penalty = -20.0  # 非法落子惩罚

        # 🔥 新增：威胁检测奖励配置
        self.threat_detection_config = {
            'critical_defense_bonus': 1.0,      # 阻止立即获胜的额外奖励倍数
            'high_defense_bonus': 0.8,          # 阻止活四的额外奖励倍数
            'medium_defense_bonus': 0.6,        # 阻止活三的额外奖励倍数
            'multiple_threat_bonus': 0.4,       # 多重威胁防守额外奖励
            'enable_detailed_logging': False     # 是否启用详细的威胁检测日志
        }

        # 五子棋专用探索配置
        self.gomoku_epsilon_start = 0.5
        self.gomoku_epsilon_end = 0.05
        self.gomoku_epsilon_decay = 20000

        # 胜率奖励系统配置
        self.enable_win_rate_rewards = True
        self.win_rate_min_games_for_recent = 20
        self.win_rate_min_games_for_total = 50

        # 最近100局胜率奖励配置
        self.win_rate_rewards_100 = {
            0.50: 100, 0.60: 150, 0.70: 200, 0.80: 250,
            0.90: 300, 0.95: 350, 0.98: 400
        }

        # 总体胜率奖励配置
        self.win_rate_rewards_total = {
            0.50: 200, 0.60: 300, 0.70: 400, 0.80: 500,
            0.90: 600, 0.95: 900, 0.98: 1200
        }

        # 批次胜率惩罚系统配置
        self.enable_batch_penalty = True
        self.batch_size = 100
        self.batch_adaptation_period = 20
        self.batch_compare_interval = 2
        self.batch_penalty_coefficient = 15
        self.batch_max_penalty = 200
        self.batch_min_penalty_threshold = 0.01

        # 批次惩罚高级配置
        self.batch_penalty_decay = True
        self.batch_penalty_decay_factor = 1.2
        self.batch_reward_recovery = True
        self.batch_recovery_reward_factor = 0.5

        # 网络架构选择
        self.use_conv_network = True  # 是否使用卷积网络（适合棋盘游戏）
        self.reward_shaping = True

        # 训练监控配置
        self.print_batch_analysis_freq = 5


    def get_gomoku_config(self, board_size=15):
        """获取五子棋专用配置"""
        # 应用五子棋专用参数
        self.epsilon_start = self.gomoku_epsilon_start
        self.epsilon_end = self.gomoku_epsilon_end
        self.epsilon_decay = self.gomoku_epsilon_decay

        # 五子棋专用网络配置
        self.hidden_dim = 512
        self.use_residual_connections = True
        self.dropout_rate = 0.15

        # 五子棋专用训练配置
        self.batch_size = 64
        self.learning_rate = 5e-4
        self.target_update_freq = 200
        self.soft_target_update = True
        self.target_update_tau = 0.01

        # 五子棋专用Buffer配置
        self.min_buffer_size = 500
        self.buffer_size = 50000

        # 五子棋专用调度配置
        self.lr_schedule = 'step'
        self.lr_step_size = 5000
        self.lr_gamma = 0.7

        # 高级特性
        self.adaptive_epsilon = True
        self.reward_shaping = True
        self.advanced_per = True
        self.per_priority_strategy = 'td_error'
        self.use_noisy_networks = False

        return self

    def set_threat_detection_config(self, **kwargs):
        """🔥 新增：设置威胁检测配置"""
        for key, value in kwargs.items():
            if key in self.threat_detection_config:
                self.threat_detection_config[key] = value
                print(f"🛡️ 威胁检测配置更新: {key} = {value}")
            else:
                print(f"⚠️ 未知的威胁检测配置项: {key}")
        return self

    def set_batch_penalty_config(self, **kwargs):
        """设置批次惩罚配置"""
        config_map = {
            'coefficient': 'batch_penalty_coefficient',
            'max_penalty': 'batch_max_penalty',
            'min_threshold': 'batch_min_penalty_threshold',
            'adaptation_period': 'batch_adaptation_period',
            'compare_interval': 'batch_compare_interval',
            'decay_factor': 'batch_penalty_decay_factor',
            'recovery_factor': 'batch_recovery_reward_factor'
        }

        for key, value in kwargs.items():
            if key in config_map:
                setattr(self, config_map[key], value)
            elif hasattr(self, f'batch_{key}'):
                setattr(self, f'batch_{key}', value)
            else:
                print(f"警告: 批次惩罚配置项 '{key}' 不存在")

        return self

    def set_win_rate_rewards(self, recent_100_rewards=None, total_rewards=None):
        """自定义胜率奖励配置"""
        if recent_100_rewards:
            self.win_rate_rewards_100 = recent_100_rewards
        if total_rewards:
            self.win_rate_rewards_total = total_rewards
        return self

    # 预设配置方法
    def set_conservative_batch_penalty(self):
        """保守的批次惩罚配置"""
        self.batch_penalty_coefficient = 10
        self.batch_max_penalty = 150
        self.batch_min_penalty_threshold = 0.02
        self.batch_adaptation_period = 25
        return self

    def set_moderate_batch_penalty(self):
        """适中的批次惩罚配置"""
        self.batch_penalty_coefficient = 15
        self.batch_max_penalty = 200
        self.batch_min_penalty_threshold = 0.01
        self.batch_adaptation_period = 20
        return self

    def set_aggressive_batch_penalty(self):
        """激进的批次惩罚配置"""
        self.batch_penalty_coefficient = 20
        self.batch_max_penalty = 250
        self.batch_min_penalty_threshold = 0.005
        self.batch_adaptation_period = 15
        return self

    # 🔥 新增：威胁检测预设配置
    def set_conservative_threat_detection(self):
        """保守的威胁检测配置"""
        self.threat_detection_config.update({
            'critical_defense_bonus': 1.5,
            'high_defense_bonus': 1.2,
            'medium_defense_bonus': 1.0,
            'multiple_threat_bonus': 0.5,
            'enable_detailed_logging': False
        })
        return self

    def set_moderate_threat_detection(self):
        """适中的威胁检测配置"""
        self.threat_detection_config.update({
            'critical_defense_bonus': 2.0,
            'high_defense_bonus': 1.5,
            'medium_defense_bonus': 1.2,
            'multiple_threat_bonus': 0.8,
            'enable_detailed_logging': False
        })
        return self

    def set_aggressive_threat_detection(self):
        """激进的威胁检测配置"""
        self.threat_detection_config.update({
            'critical_defense_bonus': 2.5,
            'high_defense_bonus': 2.0,
            'medium_defense_bonus': 1.5,
            'multiple_threat_bonus': 1.0,
            'enable_detailed_logging': True
        })
        return self


class GomokuTrainer:
    """智能五子棋DQN训练器 - 支持断点续训跳过适应期和动态惩罚调整 + 增强威胁检测系统"""

    def __init__(self, board_size=15):
        self.config = GomokuDQNConfig()
        self.config = self.config.get_gomoku_config(board_size)

        # 设置环境相关参数
        self.config.env_name = f'Gomoku-{board_size}x{board_size}'
        self.config.board_size = board_size

        self.env = GomokuEnv(board_size)
        self.config.state_dim = self.env.observation_space_shape[0]
        self.config.action_dim = self.env.action_space_n

        self.set_seed(self.config.seed)

        # 初始化网络和智能体
        self._initialize_network_and_agent()

        # 初始化经验回放缓冲区
        self._initialize_replay_buffer()

        # 智能环境包装器 - 支持断点续训和动态惩罚 + 威胁检测
        self._initialize_env_wrapper()

        self.logger = Logger(self.config.log_interval)

        # 训练参数设置
        self.save_interval = 10000
        self.start_episode = 0
        self.resume_from_checkpoint = False
        self.epsilon_offset = 0
        self.original_epsilon_params = None
        self.new_epsilon_params = None
        self.epsilon_strategy = 'reset'

        # 断点续训增强支持
        self.is_resumed_training = False
        self.resumed_batch_count = 0

        # 自适应epsilon相关变量
        if self.config.adaptive_epsilon:
            self.recent_win_rates = deque(maxlen=self.config.epsilon_adapt_window)
            self.epsilon_boost_active = False
            self.epsilon_boost_counter = 0

        # 基础训练统计
        self.invalid_moves_per_episode = []
        self.win_rate_history = []
        self.episode_invalid_moves = 0
        self.episode_100_stats = []

        # 累计统计
        self.total_wins = 0
        self.total_losses = 0
        self.total_draws = 0

        # 奖励惩罚统计
        self.total_win_rate_bonus = 0
        self.win_rate_bonus_history = []
        self.total_batch_penalties = 0
        self.batch_penalty_history = []
        self.last_batch_analysis_episode = 0

        # 🔥 新增：威胁检测统计
        self.threat_defense_stats = {
            'total_threats_blocked': 0,
            'critical_threats_blocked': 0,
            'high_threats_blocked': 0,
            'medium_threats_blocked': 0,
            'low_threats_blocked': 0,
            'multiple_threat_defenses': 0,
            'total_defense_bonus_awarded': 0,
            'threat_episodes': []  # 记录发生威胁的回合信息
        }

        # 智能惩罚系统统计
        self.penalty_effectiveness_history = []
        self.coefficient_change_history = []
        self.smart_adjustments_count = 0

        # 高级分析配置
        self.enable_advanced_analysis = True
        self.analysis_save_interval = 5000

    def _initialize_network_and_agent(self):
        """初始化网络架构和智能体"""
        if self.config.use_conv_network:
            network_class = ConvolutionalDuelingQNetwork
            self.agent = DQNAgent(self.config, lambda state_dim, action_dim, hidden_dim:
            network_class(self.config.board_size, action_dim, hidden_dim,
                          self.config.dropout_rate, self.config.use_noisy_networks))
        else:
            if self.config.dueling_dqn:
                network_class = DuelingQNetwork
            else:
                network_class = QNetwork

            self.agent = DQNAgent(self.config, lambda state_dim, action_dim, hidden_dim:
            network_class(state_dim, action_dim, hidden_dim,
                          self.config.use_residual_connections,
                          self.config.dropout_rate,
                          self.config.use_noisy_networks))

    def _initialize_replay_buffer(self):
        """初始化经验回放缓冲区"""
        if self.config.prioritized_replay:
            if self.config.advanced_per:
                self.replay_buffer = AdvancedPrioritizedReplayBuffer(
                    self.config.buffer_size,
                    self.config.per_alpha,
                    self.config.per_priority_strategy
                )
            else:
                self.replay_buffer = PrioritizedReplayBuffer(self.config.buffer_size, self.config.per_alpha)
        elif self.config.use_n_step_returns:
            self.replay_buffer = NStepReplayBuffer(
                self.config.buffer_size,
                self.config.n_step,
                self.config.gamma
            )
        else:
            self.replay_buffer = ReplayBuffer(self.config.buffer_size)

    def _initialize_env_wrapper(self):
        """初始化智能环境包装器"""
        if self.config.reward_shaping:
            reward_config = {
                'win_reward': self.config.win_reward,
                'lose_penalty': self.config.lose_penalty,
                'invalid_penalty': self.config.invalid_move_penalty,
                'intermediate_scale': self.config.intermediate_reward_scale,
                # 🔥 新增：威胁检测配置
                'threat_detection': self.config.threat_detection_config
            }

            # 使用改进的智能环境包装器
            self.env_wrapper = GomokuEnvWrapper(self.env, reward_config)

            # 配置批次惩罚系统参数
            if self.config.enable_batch_penalty:
                self.env_wrapper.set_penalty_config(
                    coefficient=self.config.batch_penalty_coefficient,
                    max_penalty=self.config.batch_max_penalty,
                    min_threshold=self.config.batch_min_penalty_threshold,
                    adaptation_period=self.config.batch_adaptation_period
                )

                print(f"🧠 智能惩罚系统已配置:")
                print(f"   惩罚系数: {self.config.batch_penalty_coefficient}")
                print(f"   最大惩罚: {self.config.batch_max_penalty}")
                print(f"   触发阈值: {self.config.batch_min_penalty_threshold:.1%}")
                print(f"   适应期: {self.config.batch_adaptation_period}批次")

            # 🔥 新增：配置威胁检测系统
            self.env_wrapper.set_threat_detection_config(**self.config.threat_detection_config)
            print(f"🛡️ 威胁检测系统已配置:")
            print(f"   关键威胁奖励倍数: {self.config.threat_detection_config['critical_defense_bonus']}")
            print(f"   高威胁奖励倍数: {self.config.threat_detection_config['high_defense_bonus']}")
            print(f"   中威胁奖励倍数: {self.config.threat_detection_config['medium_defense_bonus']}")
            print(f"   详细日志: {'启用' if self.config.threat_detection_config['enable_detailed_logging'] else '禁用'}")
        else:
            self.env_wrapper = None

    def set_seed(self, seed):
        """设置随机种子"""
        torch.manual_seed(seed)
        np.random.seed(seed)
        random.seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(seed)
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False

    def _setup_resumed_training_mode(self, completed_batches=0):
        """设置断点续训模式"""
        if self.env_wrapper and self.config.enable_batch_penalty:
            self.is_resumed_training = True
            self.resumed_batch_count = completed_batches

            # 核心改进：设置续训模式，跳过适应期
            self.env_wrapper.set_resume_training_mode(
                is_resumed=True,
                completed_batches=completed_batches
            )

            print(f"🔄 断点续训模式已激活:")
            print(f"   已完成批次: {completed_batches}")
            print(f"   惩罚系统: 立即生效 (跳过适应期)")

            return True
        return False

    def _monitor_penalty_system_performance(self):
        """监控惩罚系统表现"""
        if not self.env_wrapper:
            return

        stats = self.env_wrapper.get_penalty_summary()

        # 记录系统表现
        self.penalty_effectiveness_history.append({
            'episode': self.logger.episode_count,
            'effectiveness': stats['avg_effectiveness'],
            'coefficient': stats['current_coefficient'],
            'consecutive_poor': stats['consecutive_poor_batches']
        })

        # 记录系数变化
        if stats['recent_adjustments']:
            last_adjustment = stats['recent_adjustments'][-1]
            if last_adjustment not in self.coefficient_change_history:
                self.coefficient_change_history.append(last_adjustment)
                self.smart_adjustments_count += 1

    def _record_threat_defense_episode(self, episode, info):
        """🔥 新增：记录威胁防守回合信息"""
        if 'threats_detected' in info and info['threats_detected']:
            threat_episode = {
                'episode': episode,
                'threats_detected': info['threats_detected'],
                'defense_reward': info.get('defense_reward', 0),
                'multiple_threats': info.get('multiple_threats', 0),
                'winner': info.get('winner', 0)
            }
            self.threat_defense_stats['threat_episodes'].append(threat_episode)

            # 只保留最近1000个威胁回合记录
            if len(self.threat_defense_stats['threat_episodes']) > 1000:
                self.threat_defense_stats['threat_episodes'].pop(0)

    def select_action_with_masking(self, state, epsilon):
        """智能动作选择 - 总是从合法动作中选择"""
        legal_actions = self.env.get_legal_actions()

        if not legal_actions:
            return random.randint(0, self.config.action_dim - 1)

        if random.random() < epsilon:
            return random.choice(legal_actions)
        else:
            with torch.no_grad():
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.agent.device)
                q_values = self.agent.policy_net(state_tensor).squeeze(0)

            masked_q_values = q_values.clone()
            all_actions = set(range(self.config.action_dim))
            illegal_actions = all_actions - set(legal_actions)

            for action in illegal_actions:
                masked_q_values[action] = float('-inf')

            return masked_q_values.argmax().item()

    def select_action_noisy(self, state):
        """NoisyNet动作选择"""
        self.agent.policy_net.reset_noise()

        legal_actions = self.env.get_legal_actions()
        if not legal_actions:
            return random.randint(0, self.config.action_dim - 1)

        with torch.no_grad():
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.agent.device)
            q_values = self.agent.policy_net(state_tensor).squeeze(0)

        masked_q_values = q_values.clone()
        illegal_actions = set(range(self.config.action_dim)) - set(legal_actions)
        for action in illegal_actions:
            masked_q_values[action] = float('-inf')

        return masked_q_values.argmax().item()

    def compute_epsilon_with_strategy(self, episode, strategy='reset'):
        """根据策略计算epsilon值"""
        if strategy == 'continue':
            epsilon = self.config.epsilon_start - (self.config.epsilon_start - self.config.epsilon_end) * \
                      min(episode / self.config.epsilon_decay, 1.0)
        else:  # reset策略
            relative_episode = episode - self.epsilon_offset
            relative_episode = max(0, relative_episode)
            epsilon = self.config.epsilon_start - (self.config.epsilon_start - self.config.epsilon_end) * \
                      min(relative_episode / self.config.epsilon_decay, 1.0)

        return max(epsilon, self.config.epsilon_end)

    def _update_adaptive_epsilon(self, episode):
        """自适应epsilon调整"""
        if len(self.recent_win_rates) >= self.config.epsilon_adapt_window:
            recent_improvement = max(self.recent_win_rates) - min(self.recent_win_rates)

            if recent_improvement < self.config.epsilon_adapt_threshold:
                if not self.epsilon_boost_active:
                    original_epsilon = self.compute_epsilon_with_strategy(episode, self.epsilon_strategy)
                    boosted_epsilon = min(original_epsilon * self.config.epsilon_boost_factor, 0.3)
                    self.epsilon_boost_active = True
                    self.epsilon_boost_counter = 200
                    print(f"🔍 Episode {episode}: 胜率停滞，提高探索率 {original_epsilon:.3f} → {boosted_epsilon:.3f}")

        if self.epsilon_boost_active:
            self.epsilon_boost_counter -= 1
            if self.epsilon_boost_counter <= 0:
                self.epsilon_boost_active = False
                print(f"🎯 Episode {episode}: 探索提升结束，恢复正常衰减")

    def _print_training_stats(self, episode):
        """打印智能训练统计"""
        agent_stats = self.agent.get_training_stats()

        print(f"\n📊 Episode {episode} 智能训练统计:")
        print(f"   🧠 网络性能:")
        print(f"      平均Loss: {agent_stats.get('avg_loss', 0):.4f}")
        print(f"      平均Q值: {agent_stats.get('avg_q_value', 0):.3f}")
        print(f"      梯度范数: {agent_stats.get('avg_grad_norm', 0):.4f}")
        print(f"      学习率: {agent_stats.get('learning_rate', 0):.6f}")

        # 智能惩罚系统状态
        if self.env_wrapper:
            net_reward = self.total_win_rate_bonus - self.total_batch_penalties
            print(f"   💰 奖励系统:")
            print(f"      🎁 胜率奖励: +{self.total_win_rate_bonus:.1f}")
            print(f"      🚨 批次惩罚: -{self.total_batch_penalties:.1f}")
            print(f"      📊 净收益: {net_reward:+.1f}")

            # 显示智能惩罚系统状态
            penalty_summary = self.env_wrapper.get_penalty_summary()
            print(f"   🧠 智能惩罚系统:")
            print(f"      状态: {penalty_summary['system_status']}")
            print(f"      当前系数: {penalty_summary['current_coefficient']:.1f}")
            if penalty_summary['avg_effectiveness'] > 0:
                print(f"      平均效果: {penalty_summary['avg_effectiveness']:.1%}")
            print(f"      智能调整: {penalty_summary['coefficient_adjustments']}次")

            # 🔥 新增：显示威胁检测统计
            threat_summary = penalty_summary.get('threat_defense_summary', {})
            if threat_summary.get('total_threats_blocked', 0) > 0:
                print(f"   🛡️ 威胁检测系统:")
                print(f"      威胁阻止总数: {threat_summary['total_threats_blocked']}")
                print(f"      关键威胁防守率: {threat_summary['critical_defense_rate']:.1%}")
                print(f"      防守奖励总计: {threat_summary['total_defense_bonus']:.2f}")
                print(f"      多重威胁防守: {threat_summary['multiple_threat_defenses']}次")

    def _restore_env_wrapper_state(self, env_stats):
        """恢复环境包装器状态"""
        if not self.env_wrapper or not env_stats:
            return

        try:
            # 恢复基础统计
            self.env_wrapper.game_count = env_stats.get('games_played', 0)
            self.env_wrapper.win_count = env_stats.get('total_wins', 0)

            # 恢复批次胜率数据
            if 'batch_win_rates' in env_stats:
                self.env_wrapper.batch_win_rates = env_stats['batch_win_rates']
                completed_batches = len(self.env_wrapper.batch_win_rates)
                print(f"✅ 已恢复 {completed_batches} 个批次的胜率数据")

                # 设置续训模式
                self._setup_resumed_training_mode(completed_batches)

            # 恢复惩罚历史
            if hasattr(self.env_wrapper, 'penalty_history') and 'penalty_history' in env_stats:
                penalty_history = env_stats.get('penalty_history', [])
                # 确保penalty_history格式正确
                if penalty_history and isinstance(penalty_history[0], (list, tuple)):
                    self.env_wrapper.penalty_history = penalty_history
                    print(f"✅ 已恢复 {len(penalty_history)} 条惩罚记录")

            # 🔥 新增：恢复威胁检测统计
            if hasattr(self.env_wrapper, 'threat_defense_stats'):
                saved_threat_stats = env_stats.get('threat_defense_stats', {})
                if saved_threat_stats:
                    self.env_wrapper.threat_defense_stats.update(saved_threat_stats)
                    print(f"✅ 已恢复威胁检测统计: {saved_threat_stats.get('total_threats_blocked', 0)} 个威胁记录")

            # 恢复里程碑数据
            if 'last_100_milestone' in env_stats:
                self.env_wrapper.last_100_win_rate_milestone = env_stats['last_100_milestone']
            if 'last_total_milestone' in env_stats:
                self.env_wrapper.last_total_win_rate_milestone = env_stats['last_total_milestone']

            print(f"🔄 环境包装器状态恢复完成")

        except Exception as e:
            print(f"⚠️ 恢复环境包装器状态时出错: {e}")
            print(f"   将以正常模式继续训练")

    def _print_final_stats(self):
        """打印最终智能统计"""
        total_games = self.total_wins + self.total_losses + self.total_draws
        print("\n" + "=" * 80)
        print("🧠 智能训练统计汇总:")
        print(f"总游戏数: {total_games}")
        print(f"胜利: {self.total_wins} ({self.total_wins / total_games * 100:.1f}%)" if total_games > 0 else "胜利: 0")
        print(f"失败: {self.total_losses} ({self.total_losses / total_games * 100:.1f}%)" if total_games > 0 else "失败: 0")
        print(f"平局: {self.total_draws} ({self.total_draws / total_games * 100:.1f}%)" if total_games > 0 else "平局: 0")

        # 智能奖励惩罚汇总
        net_bonus = self.total_win_rate_bonus - self.total_batch_penalties
        print(f"\n💰 智能奖励惩罚汇总:")
        print(f"🎁 胜率奖励总计: {self.total_win_rate_bonus:.1f}")
        print(f"🧠 智能惩罚总计: {self.total_batch_penalties:.1f}")
        print(f"📊 最终净收益: {net_bonus:+.1f} ({'盈利' if net_bonus > 0 else '亏损' if net_bonus < 0 else '持平'})")

        # 智能系统效率分析
        if self.env_wrapper:
            penalty_summary = self.env_wrapper.get_penalty_summary()
            print(f"\n🧠 智能系统效率分析:")
            print(f"   系统状态: {penalty_summary['system_status']}")
            print(f"   最终惩罚系数: {penalty_summary['current_coefficient']:.1f}")
            print(f"   智能调整次数: {penalty_summary['coefficient_adjustments']}")
            if penalty_summary['avg_effectiveness'] > 0:
                print(f"   平均惩罚效果: {penalty_summary['avg_effectiveness']:.1%}")

            # 断点续训信息
            if self.is_resumed_training:
                print(f"   断点续训: 从第{self.resumed_batch_count}批次继续")
                print(f"   适应期跳过: ✅ 生效")

        print("=" * 80)

    def plot_comprehensive_analysis(self, episode):
            """🔥 绘制综合分析图表"""
            if not self.enable_advanced_analysis or len(self.env_wrapper.batch_win_rates) < 3:
                return

            fig, axes = plt.subplots(2, 3, figsize=(18, 12))
            fig.suptitle(f'五子棋训练综合分析 (Episode {episode})', fontsize=16, fontweight='bold')

            # 1. 胜率趋势图
            batch_nums = range(1, len(self.env_wrapper.batch_win_rates) + 1)
            axes[0,0].plot(batch_nums, self.env_wrapper.batch_win_rates, 'b-', linewidth=2, marker='o')
            axes[0,0].set_title('批次胜率趋势')
            axes[0,0].set_xlabel('批次')
            axes[0,0].set_ylabel('胜率')
            axes[0,0].grid(True, alpha=0.3)
            axes[0,0].axhline(y=0.5, color='r', linestyle='--', alpha=0.5, label='50%基准线')
            axes[0,0].legend()

            # 2. 惩罚分布图
            if self.env_wrapper.penalty_history:
                penalty_amounts = [p[1] for p in self.env_wrapper.penalty_history]
                axes[0,1].hist(penalty_amounts, bins=min(10, len(penalty_amounts)), alpha=0.7, color='red', edgecolor='black')
                axes[0,1].set_title('惩罚金额分布')
                axes[0,1].set_xlabel('惩罚金额')
                axes[0,1].set_ylabel('频次')
            else:
                axes[0,1].text(0.5, 0.5, '暂无惩罚数据', ha='center', va='center', transform=axes[0,1].transAxes)
                axes[0,1].set_title('惩罚金额分布')

            # 3. 最近批次对比
            if len(self.env_wrapper.batch_win_rates) >= 5:
                recent_rates = self.env_wrapper.batch_win_rates[-5:]
                colors = ['red' if i > 0 and recent_rates[i] < recent_rates[i-1] else 'green' for i in range(5)]
                bars = axes[0,2].bar(range(1, 6), recent_rates, alpha=0.7, color=colors, edgecolor='black')
                axes[0,2].set_title('最近5批次胜率对比')
                axes[0,2].set_xlabel('批次 (倒数第n个)')
                axes[0,2].set_ylabel('胜率')

                # 添加数值标签
                for bar, rate in zip(bars, recent_rates):
                    axes[0,2].text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01,
                                   f'{rate:.1%}', ha='center', va='bottom')

            # 4. 惩罚效果追踪
            if len(self.env_wrapper.penalty_effectiveness_tracker) > 0:
                axes[1,0].plot(range(1, len(self.env_wrapper.penalty_effectiveness_tracker) + 1),
                               self.env_wrapper.penalty_effectiveness_tracker, 'o-', color='orange', linewidth=2)
                axes[1,0].set_title('惩罚效果追踪')
                axes[1,0].set_xlabel('评估周期')
                axes[1,0].set_ylabel('效果评分')
                axes[1,0].set_ylim(0, 1)
                axes[1,0].axhline(y=0.5, color='r', linestyle='--', alpha=0.5, label='中等效果线')
                axes[1,0].grid(True, alpha=0.3)
                axes[1,0].legend()

            # 5. 奖励惩罚累计图
            episodes_list = []
            cumulative_rewards = []
            cumulative_penalties = []
            cumulative_net = []

            cum_reward = 0
            cum_penalty = 0

            for ep, reward in self.win_rate_bonus_history:
                episodes_list.append(ep)
                cum_reward += reward
                cumulative_rewards.append(cum_reward)
                cumulative_penalties.append(cum_penalty)
                cumulative_net.append(cum_reward - cum_penalty)

            for ep, penalty in self.batch_penalty_history:
                if ep not in episodes_list:
                    episodes_list.append(ep)
                    cumulative_rewards.append(cum_reward)
                cum_penalty += penalty
                cumulative_penalties.append(cum_penalty)
                cumulative_net.append(cum_reward - cum_penalty)

            if episodes_list:
                axes[1,1].plot(episodes_list, cumulative_rewards, 'g-', label='累计奖励', linewidth=2)
                axes[1,1].plot(episodes_list, cumulative_penalties, 'r-', label='累计惩罚', linewidth=2)
                axes[1,1].plot(episodes_list, cumulative_net, 'b-', label='净收益', linewidth=2)
                axes[1,1].set_title('奖励惩罚累计变化')
                axes[1,1].set_xlabel('Episode')
                axes[1,1].set_ylabel('累计分数')
                axes[1,1].legend()
                axes[1,1].grid(True, alpha=0.3)

            # 6. 惩罚系数调整历史
            if self.env_wrapper.penalty_adjustment_history:
                adj_batches = [adj['batch_num'] for adj in self.env_wrapper.penalty_adjustment_history]
                adj_coefficients = [adj['new_coefficient'] for adj in self.env_wrapper.penalty_adjustment_history]
                axes[1,2].plot(adj_batches, adj_coefficients, 'mo-', linewidth=2)
                axes[1,2].set_title('惩罚系数动态调整')
                axes[1,2].set_xlabel('批次号')  # 修改标签以反映实际数据
                axes[1,2].set_ylabel('惩罚系数')
                axes[1,2].grid(True, alpha=0.3)
            else:
                axes[1,2].text(0.5, 0.5, '暂无系数调整', ha='center', va='center', transform=axes[1,2].transAxes)
                axes[1,2].set_title('惩罚系数动态调整')

            plt.tight_layout()

            # 保存图表
            os.makedirs('analysis_charts', exist_ok=True)
            chart_path = f'analysis_charts/comprehensive_analysis_episode_{episode}.png'
            plt.savefig(chart_path, dpi=300, bbox_inches='tight')
            print(f"📊 综合分析图表已保存: {chart_path}")

            # 在训练过程中不显示图表，避免阻塞
            plt.close()

    def load_checkpoint(self, model_path, epsilon_strategy='reset'):
        """从检查点加载模型和训练状态"""
        try:
            # 保存策略和当前的epsilon参数
            self.epsilon_strategy = epsilon_strategy
            self.new_epsilon_params = {
                'epsilon_start': self.config.epsilon_start,
                'epsilon_end': self.config.epsilon_end,
                'epsilon_decay': self.config.epsilon_decay
            }

            # 加载模型
            print(f"📂 加载模型: {model_path}")
            if not os.path.exists(model_path):
                raise FileNotFoundError(f"模型文件不存在: {model_path}")

            self.agent.load(model_path)
            print("✅ 模型加载成功")

            # 设置为已从检查点恢复
            self.resume_from_checkpoint = True

        except Exception as e:
            print(f"❌ 加载检查点失败: {e}")
            raise
    def train(self, num_episodes=None, resume_model_path=None, epsilon_strategy='reset'):
        """智能训练方法 - 支持断点续训跳过适应期 + 增强威胁检测"""
        if num_episodes is not None:
            self.config.num_episodes = num_episodes

        # 改进：处理断点续训
        if resume_model_path:
            self.load_checkpoint(resume_model_path, epsilon_strategy=epsilon_strategy)

        # 打印配置摘要
        if not self.resume_from_checkpoint:
            print(self.config.get_training_summary())

        # 计算实际需要训练的回合数
        remaining_episodes = self.config.num_episodes - self.start_episode
        if remaining_episodes <= 0:
            print(f"⚠️ 模型已训练完成 ({self.start_episode}/{self.config.num_episodes} 回合)")
            return self.logger.episode_rewards

        print(f"🔥 开始智能训练 (棋盘大小: {self.config.board_size}x{self.config.board_size})")
        print(f"🎁 胜率奖励: {'启用' if self.config.enable_win_rate_rewards else '禁用'}")
        print(f"🧠 智能惩罚: {'启用' if self.config.enable_batch_penalty else '禁用'}")
        print(f"🛡️ 威胁检测: {'启用' if self.config.threat_detection_config['enable_detailed_logging'] else '启用(简化日志)'}")
        if self.is_resumed_training:
            print(f"🔄 断点续训: 已完成{self.resumed_batch_count}批次，惩罚系统立即生效")

        # 训练循环
        for episode in tqdm(range(self.start_episode, self.config.num_episodes),
                            desc="智能训练五子棋AI",
                            initial=self.start_episode,
                            total=self.config.num_episodes):

            # 自适应epsilon调整
            if self.config.adaptive_epsilon and episode > 100 and episode % 100 == 0:
                self._update_adaptive_epsilon(episode)

            # 重置环境
            if self.env_wrapper:
                state = self.env_wrapper.reset()
            else:
                state = self.env.reset()

            episode_reward = 0
            episode_length = 0
            done = False
            self.episode_invalid_moves = 0
            episode_win_rate_bonus = 0
            episode_batch_penalty = 0

            # 存储本局经验
            episode_experiences = []

            # 计算epsilon
            epsilon = self.compute_epsilon_with_strategy(episode, self.epsilon_strategy)

            # 游戏循环
            while not done:
                # 选择动作
                if self.config.use_noisy_networks:
                    action = self.select_action_noisy(state)
                else:
                    action = self.select_action_with_masking(state, epsilon)

                # 执行动作
                if self.env_wrapper:
                    next_state, reward, done, info = self.env_wrapper.step(action)
                else:
                    next_state, reward, done, info = self.env.step(action)

                # 🔥 新增：记录威胁检测信息
                if 'threats_detected' in info and info['threats_detected']:
                    self._record_threat_defense_episode(episode, info)
                    # 累计威胁统计
                    for direction, threat_level in info['threats_detected'].items():
                        if threat_level == 'CRITICAL':
                            self.threat_defense_stats['critical_threats_blocked'] += 1
                        elif threat_level == 'HIGH':
                            self.threat_defense_stats['high_threats_blocked'] += 1
                        elif threat_level == 'MEDIUM':
                            self.threat_defense_stats['medium_threats_blocked'] += 1
                        elif threat_level == 'LOW':
                            self.threat_defense_stats['low_threats_blocked'] += 1

                    self.threat_defense_stats['total_threats_blocked'] += len(info['threats_detected'])

                    if info.get('multiple_threats', 0) > 1:
                        self.threat_defense_stats['multiple_threat_defenses'] += 1

                    if info.get('defense_reward', 0) > 0:
                        self.threat_defense_stats['total_defense_bonus_awarded'] += info['defense_reward']

                # 记录奖励和惩罚
                if done:
                    if 'win_rate_bonus' in info:
                        episode_win_rate_bonus = info['win_rate_bonus']
                        self.total_win_rate_bonus += episode_win_rate_bonus

                    if 'batch_penalty' in info:
                        episode_batch_penalty = info['batch_penalty']
                        self.total_batch_penalties += episode_batch_penalty

                    # 🔥 新增：记录最终威胁信息
                    if 'threats_detected' in info:
                        episode_threat_info = {
                            'threats_detected': info['threats_detected'],
                            'defense_reward': info.get('defense_reward', 0),
                            'multiple_threats': info.get('multiple_threats', 0)
                        }

                # 暂存经验
                episode_experiences.append((state, action, reward, next_state, done, info))

                # 统计非法落子
                if 'invalid_move' in info and info['invalid_move']:
                    self.episode_invalid_moves += 1

                state = next_state
                episode_reward += reward
                episode_length += 1

            # 游戏结束后处理
            final_winner = self.env.winner if not self.env_wrapper else info.get('winner', 0)
            ai_player = 1

            # 统计游戏结果
            if final_winner == ai_player:
                self.total_wins += 1
            elif final_winner == -ai_player:
                self.total_losses += 1
            else:
                self.total_draws += 1

            # 将经验加入回放缓冲区
            for s, a, r, ns, is_done, info_dict in episode_experiences:
                self.replay_buffer.push(s, a, r, ns, is_done)

            # 记录统计信息
            self.invalid_moves_per_episode.append(self.episode_invalid_moves)
            if episode_win_rate_bonus > 0:
                self.win_rate_bonus_history.append((episode, episode_win_rate_bonus))
            if episode_batch_penalty > 0:
                self.batch_penalty_history.append((episode, episode_batch_penalty))

            # 监控惩罚系统表现
            if episode % 1000 == 0:
                self._monitor_penalty_system_performance()

            # 训练网络
            if len(self.replay_buffer) >= self.config.min_buffer_size:
                if self.config.prioritized_replay:
                    beta = self.config.per_beta_start + \
                           (self.config.per_beta_end - self.config.per_beta_start) * \
                           (episode / self.config.num_episodes)
                    batch = self.replay_buffer.sample(self.config.batch_size, beta)
                    loss, q_value, td_errors = self.agent.update(batch, batch[6])
                    indices = batch[5]
                    priorities = td_errors.squeeze() + 1e-6
                    self.replay_buffer.update_priorities(indices, priorities)
                else:
                    batch = self.replay_buffer.sample(self.config.batch_size)
                    loss, q_value, _ = self.agent.update(batch)

                self.logger.log_loss(loss, q_value)
                self.agent.update_target_network()

            # 定期打印智能训练统计
            if self.config.monitor_training and episode % self.config.print_training_stats_freq == 0:
                self._print_training_stats(episode)

            # 定期进行智能批次分析
            if episode > 0 and episode % (self.config.batch_size * self.config.print_batch_analysis_freq) == 0:
                if self.env_wrapper:
                    self.env_wrapper.print_batch_analysis()


            # 100局统计
            if episode > 0 and (episode + 1) % 100 == 0:
                episodes_in_batch = min(100, episode + 1)
                wins_in_batch = self.total_wins - (0 if len(self.win_rate_history) == 0 else self.win_rate_history[-1][1])
                win_rate_last_100 = wins_in_batch / episodes_in_batch

                self.win_rate_history.append((episode + 1, self.total_wins, win_rate_last_100))
                self.episode_100_stats.append((episode + 1, wins_in_batch, 100 - wins_in_batch, 0))

                print(f"\n📊 Episode {episode + 1}: 最近{episodes_in_batch}局胜{wins_in_batch}局 "
                      f"(胜率: {win_rate_last_100:.2%}), "
                      f"总计: 胜{self.total_wins} 负{self.total_losses} 平{self.total_draws}, "
                      f"Epsilon: {epsilon:.3f}")

                # 🔥 新增：显示威胁检测摘要
                if self.threat_defense_stats['total_threats_blocked'] > 0:
                    critical_rate = self.threat_defense_stats['critical_threats_blocked'] / self.threat_defense_stats['total_threats_blocked']
                    print(f"   🛡️ 威胁防守: 总计{self.threat_defense_stats['total_threats_blocked']}次, "
                          f"关键威胁{self.threat_defense_stats['critical_threats_blocked']}次({critical_rate:.1%})")

                # 智能胜率奖励和批次惩罚系统状态
                if self.env_wrapper:
                    self.env_wrapper.print_win_rate_status()

            self.logger.log_episode(episode, episode_reward, episode_length, epsilon)

            # 定期保存分析图表
            if self.enable_advanced_analysis and (episode + 1) % self.analysis_save_interval == 0:
                self.plot_comprehensive_analysis(episode+1)

            # 定期保存模型,定期保存分析图表
            if (episode + 1) % self.save_interval == 0:
                os.makedirs('checkpoints', exist_ok=True)
                self.agent.save(f'checkpoints/gomoku_dqn_episode_{episode + 1}.pth')
                self.plot_comprehensive_analysis(episode+1)
                print(f"🧠 智能模型已保存: episode {episode + 1}")

        self.env.close()
        self._print_final_stats()

        print("🧠 智能五子棋AI训练完成！")
        return self.logger.episode_rewards