import matplotlib.pyplot as plt
import numpy as np
from collections import deque

# 设置中文字体（适用于macOS）
plt.rcParams['font.sans-serif'] = ['PingFang SC', 'Arial Unicode MS', 'SimHei']
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

class GomokuEnvWrapper:
    """五子棋环境的专用包装器 - 增强版威胁检测 + 智能批次惩罚系统"""
    def __init__(self, env, reward_config=None):
        self.env = env
        self.reward_config = reward_config or {}

        # 默认奖励配置
        self.win_reward = self.reward_config.get('win_reward', 20.0)
        self.lose_penalty = self.reward_config.get('lose_penalty', -10.0)
        self.invalid_penalty = self.reward_config.get('invalid_penalty', -20.0)
        self.intermediate_scale = self.reward_config.get('intermediate_scale', 0.1)

        # 🔥 新增：威胁检测奖励配置
        self.threat_detection_config = self.reward_config.get('threat_detection', {
            'critical_defense_bonus': 2.0,    # 阻止立即获胜的额外奖励倍数
            'high_defense_bonus': 1.5,        # 阻止活四的额外奖励倍数
            'medium_defense_bonus': 1.2,      # 阻止活三的额外奖励倍数
            'multiple_threat_bonus': 0.8,     # 多重威胁防守额外奖励
            'enable_detailed_logging': False   # 是否启用详细的威胁检测日志
        })

        # 统计信息
        self.move_count = 0
        self.game_count = 0
        self.win_count = 0

        # 🔥 威胁检测统计
        self.threat_defense_stats = {
            'total_threats_blocked': 0,
            'critical_threats_blocked': 0,
            'high_threats_blocked': 0,
            'medium_threats_blocked': 0,
            'low_threats_blocked': 0,
            'multiple_threat_defenses': 0,
            'total_defense_bonus_awarded': 0
        }

        # 胜率奖励系统
        self.recent_100_games = []  # 存储最近100局的结果 (True=胜, False=负)
        self.last_100_win_rate_milestone = 0  # 上次100局胜率达到的里程碑
        self.last_total_win_rate_milestone = 0  # 上次总胜率达到的里程碑

        # 🔥 批次胜率惩罚系统
        self.batch_size = 100  # 每批次局数
        self.batch_win_rates = []  # 存储每个批次的胜率 [batch1_rate, batch2_rate, ...]
        self.current_batch_wins = 0  # 当前批次的胜利次数
        self.current_batch_games = 0  # 当前批次的游戏次数
        self.total_penalties = 0  # 累计惩罚分数
        self.penalty_history = []  # 惩罚历史记录 [(batch_num, penalty, reason), ...]

        # 🔥 改进1: 智能适应期管理
        self.adaptation_period = 20  # 适应期批次数 (2000局 = 20批次)
        self.is_resumed_training = False  # 🔥 新增：是否为断点续训
        self.skip_adaptation_on_resume = True  # 🔥 新增：续训时跳过适应期
        self.effective_adaptation_period = self.adaptation_period  # 🔥 实际适应期

        self.compare_interval = 2  # 对比间隔 (往前第2个批次)
        self.penalty_coefficient = 15  # 惩罚系数 (10-20倍)
        self.max_penalty = 200  # 最大惩罚上限
        self.min_penalty_threshold = 0.01  # 最小惩罚阈值 (1%差距)

        # 🔥 改进2: 动态惩罚系数调整系统
        self.penalty_effectiveness_tracker = deque(maxlen=10)  # 惩罚效果追踪
        self.penalty_adjustment_history = []  # 惩罚系数调整历史
        self.last_penalty_effectiveness_check = 0  # 上次效果检查的批次
        self.penalty_check_interval = 5  # 每5个批次检查一次效果

        # 惩罚效果阈值
        self.low_effectiveness_threshold = 0.3  # 低效果阈值
        self.high_effectiveness_threshold = 0.8  # 高效果阈值
        self.coefficient_adjustment_factor = 1.2  # 系数调整倍数
        self.max_coefficient = 30  # 最大惩罚系数
        self.min_coefficient = 5   # 最小惩罚系数

        # 连续惩罚追踪
        self.consecutive_poor_batches = 0  # 连续表现不佳的批次数
        self.poor_performance_threshold = 3  # 连续多少个批次表现不佳就加重惩罚

        # 胜率奖励配置
        self.win_rate_rewards_100 = {
            0.50: 0, 0.60: 100, 0.70: 150, 0.80: 200,
            0.90: 250, 0.95: 300, 0.98: 400
        }

        self.win_rate_rewards_total = {
            0.50: 0, 0.60: 300, 0.70: 400, 0.80: 500,
            0.90: 600, 0.95: 900, 0.98: 1200
        }

    def set_resume_training_mode(self, is_resumed=True, completed_batches=0):
        """🔥 新增：设置断点续训模式"""
        self.is_resumed_training = is_resumed

        if is_resumed and self.skip_adaptation_on_resume:
            # 断点续训时跳过适应期
            self.effective_adaptation_period = 0
            print(f"🔄 断点续训模式: 跳过适应期，惩罚系统立即生效")
            print(f"📊 已完成批次: {completed_batches}")
        else:
            # 正常训练保持原适应期
            self.effective_adaptation_period = self.adaptation_period
            print(f"🆕 正常训练模式: 适应期 {self.adaptation_period} 批次")

    def _calculate_penalty_effectiveness(self):
        """🔥 新增：计算惩罚效果"""
        if len(self.penalty_history) < 3 or len(self.batch_win_rates) < 5:
            return None

        # 分析最近的惩罚是否带来了改善
        recent_penalties = self.penalty_history[-3:]  # 最近3次惩罚
        improvements = 0
        total_penalties = len(recent_penalties)

        for batch_num, penalty_amount, reason in recent_penalties:
            # 检查惩罚后的几个批次是否有改善
            penalty_batch_idx = batch_num - 1  # 转换为索引

            if penalty_batch_idx < len(self.batch_win_rates):
                penalty_batch_rate = self.batch_win_rates[penalty_batch_idx]

                # 检查后续2个批次的表现
                improvement_found = False
                for check_idx in range(penalty_batch_idx + 1, min(penalty_batch_idx + 3, len(self.batch_win_rates))):
                    if self.batch_win_rates[check_idx] > penalty_batch_rate + 0.02:  # 改善超过2%
                        improvement_found = True
                        break

                if improvement_found:
                    improvements += 1

        effectiveness = improvements / total_penalties if total_penalties > 0 else 0
        return effectiveness

    def _adjust_penalty_coefficient_based_on_effectiveness(self):
        """🔥 新增：根据效果调整惩罚系数"""
        current_batch_num = len(self.batch_win_rates) + 1

        # 每隔一定批次检查一次
        if current_batch_num - self.last_penalty_effectiveness_check < self.penalty_check_interval:
            return

        effectiveness = self._calculate_penalty_effectiveness()
        if effectiveness is None:
            return

        self.penalty_effectiveness_tracker.append(effectiveness)
        self.last_penalty_effectiveness_check = current_batch_num

        old_coefficient = self.penalty_coefficient
        adjustment_made = False

        if effectiveness < self.low_effectiveness_threshold:
            # 效果不佳，增加惩罚系数
            new_coefficient = min(self.penalty_coefficient * self.coefficient_adjustment_factor, self.max_coefficient)
            if new_coefficient != self.penalty_coefficient:
                self.penalty_coefficient = new_coefficient
                adjustment_made = True
                adjustment_type = "增强"
                reason = f"惩罚效果低({effectiveness:.1%})"

        elif effectiveness > self.high_effectiveness_threshold:
            # 效果过强，适当降低惩罚系数
            new_coefficient = max(self.penalty_coefficient / self.coefficient_adjustment_factor, self.min_coefficient)
            if new_coefficient != self.penalty_coefficient:
                self.penalty_coefficient = new_coefficient
                adjustment_made = True
                adjustment_type = "减弱"
                reason = f"惩罚效果过强({effectiveness:.1%})"

        if adjustment_made:
            self.penalty_adjustment_history.append({
                'batch_num': current_batch_num,
                'old_coefficient': old_coefficient,
                'new_coefficient': self.penalty_coefficient,
                'effectiveness': effectiveness,
                'adjustment_type': adjustment_type,
                'reason': reason
            })

            print(f"🔧 惩罚系数{adjustment_type}: {old_coefficient:.1f} → {self.penalty_coefficient:.1f}")
            print(f"   📊 原因: {reason}")

    def _track_consecutive_poor_performance(self, current_batch_rate):
        """🔥 新增：追踪连续表现不佳"""
        if len(self.batch_win_rates) < 2:
            return

        # 与前一个批次比较
        previous_rate = self.batch_win_rates[-1]

        if current_batch_rate < previous_rate - 0.02:  # 下降超过2%
            self.consecutive_poor_batches += 1

            # 连续表现不佳时额外增加惩罚系数
            if self.consecutive_poor_batches >= self.poor_performance_threshold:
                old_coefficient = self.penalty_coefficient
                boost_factor = 1 + (self.consecutive_poor_batches - self.poor_performance_threshold + 1) * 0.1
                new_coefficient = min(old_coefficient * boost_factor, self.max_coefficient)

                if new_coefficient != old_coefficient:
                    self.penalty_coefficient = new_coefficient
                    print(f"⚠️ 连续{self.consecutive_poor_batches}批次表现不佳，加重惩罚: {old_coefficient:.1f} → {new_coefficient:.1f}")
        else:
            # 表现回升，重置计数器
            if self.consecutive_poor_batches > 0:
                print(f"✅ 表现回升，重置连续不佳计数器 (之前:{self.consecutive_poor_batches})")
            self.consecutive_poor_batches = 0

    def reset(self):
        """重置环境"""
        state = self.env.reset()
        self.move_count = 0
        self.game_count += 1
        return state

    def step(self, action):
        """执行动作并应用奖励塑形"""
        state, reward, done, info = self.env.step(action)
        self.move_count += 1

        # 🔥 五子棋专用奖励塑形 + 威胁检测奖励
        shaped_reward = self._shape_gomoku_reward_with_threat_detection(reward, done, info)

        # 🔥 游戏结束处理
        if done and 'winner' in info:
            # 只有在游戏真正结束时才计算批次统计
            self.current_batch_games += 1
            # 计算胜率奖励
            win_rate_bonus = self._calculate_win_rate_bonus(info['winner'])

            # 🔥 改进：智能批次惩罚计算
            batch_penalty = self._calculate_batch_penalty(info['winner'])

            # 合并奖励和惩罚
            total_bonus = win_rate_bonus - batch_penalty
            shaped_reward += total_bonus

            info['win_rate_bonus'] = win_rate_bonus
            info['batch_penalty'] = batch_penalty
            info['total_bonus'] = total_bonus

            # 记录游戏结果
            self._record_game_result(info['winner'])

        # 更新统计
        if done and 'winner' in info and info['winner'] == 1:
            self.win_count += 1

        info['original_reward'] = reward
        info['move_count'] = self.move_count
        info['game_count'] = self.game_count
        info['win_rate'] = self.win_count / max(self.game_count, 1)
        info['recent_100_win_rate'] = self._get_recent_100_win_rate()
        info['current_batch'] = len(self.batch_win_rates) + 1
        info['current_batch_progress'] = self.current_batch_games
        info['total_penalties'] = self.total_penalties
        info['penalty_coefficient'] = self.penalty_coefficient  # 🔥 新增：当前惩罚系数
        info['consecutive_poor_batches'] = self.consecutive_poor_batches  # 🔥 新增

        # 🔥 新增：威胁检测统计信息
        info['threat_defense_stats'] = self.threat_defense_stats.copy()

        return state, shaped_reward, done, info

    def _shape_gomoku_reward_with_threat_detection(self, reward, done, info):
        """🔥 增强版五子棋奖励塑形 - 集成威胁检测"""
        shaped_reward = reward

        if done:
            if 'invalid_move' in info and info['invalid_move']:
                shaped_reward = self.invalid_penalty
            elif 'winner' in info:
                if info['winner'] == 1:  # AI获胜
                    shaped_reward = self.win_reward
                elif info['winner'] == -1:  # AI失败
                    shaped_reward = self.lose_penalty
                else:  # 平局
                    shaped_reward = 0
        else:
            # 基础中间奖励缩放
            shaped_reward *= self.intermediate_scale

            # 🔥 威胁检测奖励增强
            threat_bonus = self._calculate_threat_detection_bonus(info)
            shaped_reward += threat_bonus

            # 长局惩罚
            if self.move_count > 50:
                shaped_reward -= 0.01

        return shaped_reward

    def _calculate_threat_detection_bonus(self, info):
        """🔥 新增：计算威胁检测奖励"""
        if 'threats_detected' not in info:
            return 0.0

        threat_bonus = 0.0
        threats_detected = info.get('threats_detected', {})
        defense_reward = info.get('defense_reward', 0)
        multiple_threats = info.get('multiple_threats', 0)

        # 根据威胁级别给予额外奖励
        threat_counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0}

        for direction, threat_level in threats_detected.items():
            if threat_level == 'CRITICAL':
                threat_bonus += defense_reward * self.threat_detection_config['critical_defense_bonus']
                threat_counts['critical'] += 1
                self.threat_defense_stats['critical_threats_blocked'] += 1
            elif threat_level == 'HIGH':
                threat_bonus += defense_reward * self.threat_detection_config['high_defense_bonus']
                threat_counts['high'] += 1
                self.threat_defense_stats['high_threats_blocked'] += 1
            elif threat_level == 'MEDIUM':
                threat_bonus += defense_reward * self.threat_detection_config['medium_defense_bonus']
                threat_counts['medium'] += 1
                self.threat_defense_stats['medium_threats_blocked'] += 1
            elif threat_level == 'LOW':
                threat_counts['low'] += 1
                self.threat_defense_stats['low_threats_blocked'] += 1

        # 多重威胁防守奖励
        if multiple_threats > 1:
            threat_bonus += multiple_threats * self.threat_detection_config['multiple_threat_bonus']
            self.threat_defense_stats['multiple_threat_defenses'] += 1

        # 更新统计
        if sum(threat_counts.values()) > 0:
            self.threat_defense_stats['total_threats_blocked'] += sum(threat_counts.values())
            self.threat_defense_stats['total_defense_bonus_awarded'] += threat_bonus

        # 详细日志记录
        if self.threat_detection_config['enable_detailed_logging'] and threat_bonus > 0:
            print(f"🛡️ 威胁检测奖励: +{threat_bonus:.3f}")
            print(f"   检测到威胁: {threats_detected}")
            if multiple_threats > 1:
                print(f"   多重威胁防守: {multiple_threats}个威胁")

        return threat_bonus

    def _record_game_result(self, winner):
        """记录游戏结果用于胜率统计"""
        is_win = (winner == 1)  # AI是玩家1

        # 更新最近100局记录
        self.recent_100_games.append(is_win)
        if len(self.recent_100_games) > 100:
            self.recent_100_games.pop(0)

        # 🔥 更新当前批次统计
        if is_win:
            self.current_batch_wins += 1

        # 🔥 检查是否完成一个批次
        if self.current_batch_games >= self.batch_size:
            self._finalize_batch()

    def _finalize_batch(self):
        """完成当前批次，记录胜率并重置计数器"""
        current_batch_rate = self.current_batch_wins / self.current_batch_games
        self.batch_win_rates.append(current_batch_rate)

        batch_num = len(self.batch_win_rates)
        print(f"📊 批次 {batch_num} 完成: {self.current_batch_wins}/{self.current_batch_games} 胜率: {current_batch_rate:.2%}")

        # 🔥 新增：追踪连续表现不佳
        self._track_consecutive_poor_performance(current_batch_rate)

        # 🔥 新增：动态调整惩罚系数
        self._adjust_penalty_coefficient_based_on_effectiveness()

        # 重置当前批次计数器
        self.current_batch_wins = 0
        self.current_batch_games = 0

    def _calculate_batch_penalty(self, winner):
        """🔥 改进：智能批次胜率惩罚计算"""
        # 只在游戏结束时检查，不论输赢都要检查惩罚
        if self.current_batch_games < self.batch_size:
            return 0.0  # 批次未完成，不计算惩罚

        current_batch_num = len(self.batch_win_rates) + 1

        # 🔥 改进1: 使用智能适应期
        if current_batch_num <= self.effective_adaptation_period:
            if self.is_resumed_training:
                print(f"🔄 断点续训模式: 批次{current_batch_num}惩罚系统已生效")
            else:
                print(f"🛡️ 适应期保护: 批次{current_batch_num}暂不惩罚 (适应期:{self.effective_adaptation_period})")
                return 0.0

        # 2. 获取对比批次
        compare_batch_num = current_batch_num - self.compare_interval
        if compare_batch_num < 1 or compare_batch_num > len(self.batch_win_rates):
            return 0.0

        # 3. 计算当前批次胜率（包含正在进行的游戏）
        total_current_wins = self.current_batch_wins + (1 if winner == 1 else 0)
        current_batch_rate = total_current_wins / self.batch_size

        # 4. 获取对比批次胜率
        compare_batch_rate = self.batch_win_rates[compare_batch_num - 1]

        # 5. 计算胜率差距
        rate_diff = compare_batch_rate - current_batch_rate

        # 6. 只有退步才惩罚，且差距要超过最小阈值
        if rate_diff <= self.min_penalty_threshold:
            return 0.0

        # 🔥 改进2: 动态惩罚计算
        # 基础惩罚
        base_penalty = rate_diff * self.penalty_coefficient

        # 连续表现不佳时加重惩罚
        if self.consecutive_poor_batches > 0:
            consecutive_multiplier = 1 + (self.consecutive_poor_batches * 0.2)
            base_penalty *= consecutive_multiplier
            print(f"⚠️ 连续{self.consecutive_poor_batches}批次表现不佳，惩罚加重{consecutive_multiplier:.1f}倍")

        # 应用上限
        penalty = min(base_penalty, self.max_penalty)

        # 8. 记录惩罚
        self.total_penalties += penalty
        reason = f"批次{current_batch_num}胜率({current_batch_rate:.2%}) < 批次{compare_batch_num}胜率({compare_batch_rate:.2%}), 差距{rate_diff:.2%}"
        if self.consecutive_poor_batches > 0:
            reason += f" [连续第{self.consecutive_poor_batches + 1}次下滑]"

        self.penalty_history.append((current_batch_num, penalty, reason))

        print(f"🚨 批次胜率惩罚: {penalty:.1f}分 - {reason}")
        print(f"   💰 当前惩罚系数: {self.penalty_coefficient:.1f}")

        return penalty

    def _get_recent_100_win_rate(self):
        """获取最近100局胜率"""
        if not self.recent_100_games:
            return 0.0
        return sum(self.recent_100_games) / len(self.recent_100_games)

    def _calculate_win_rate_bonus(self, winner):
        """🔥 核心功能：计算胜率奖励"""
        if winner != 1:  # 只有AI获胜时才计算奖励
            return 0.0

        total_bonus = 0.0

        # 1. 计算100局胜率奖励
        if len(self.recent_100_games) >= 20:
            recent_win_rate = self._get_recent_100_win_rate()
            bonus_100 = self._get_milestone_bonus(
                recent_win_rate,
                self.win_rate_rewards_100,
                self.last_100_win_rate_milestone,
                "100局"
            )
            total_bonus += bonus_100

            # 更新里程碑
            for threshold in sorted(self.win_rate_rewards_100.keys(), reverse=True):
                if recent_win_rate >= threshold and threshold > self.last_100_win_rate_milestone:
                    self.last_100_win_rate_milestone = threshold
                    break

        # 2. 计算总体胜率奖励
        if self.game_count >= 50:
            total_win_rate = self.win_count / self.game_count
            bonus_total = self._get_milestone_bonus(
                total_win_rate,
                self.win_rate_rewards_total,
                self.last_total_win_rate_milestone,
                "总体"
            )
            total_bonus += bonus_total

            # 更新里程碑
            for threshold in sorted(self.win_rate_rewards_total.keys(), reverse=True):
                if total_win_rate >= threshold and threshold > self.last_total_win_rate_milestone:
                    self.last_total_win_rate_milestone = threshold
                    break

        return total_bonus

    def _get_milestone_bonus(self, current_rate, reward_dict, last_milestone, rate_type):
        """获取里程碑奖励"""
        bonus = 0.0

        for threshold in sorted(reward_dict.keys(), reverse=True):
            if current_rate >= threshold and threshold > last_milestone:
                bonus = reward_dict[threshold]
                print(f"🎉 {rate_type}胜率达到 {threshold*100:.1f}%! 获得奖励: {bonus}")
                break

        return bonus

    def get_stats(self):
        """获取统计信息"""
        recent_win_rate = self._get_recent_100_win_rate()
        total_win_rate = self.win_count / max(self.game_count, 1)

        current_batch_num = len(self.batch_win_rates) + 1

        if self.current_batch_games > 0:
            current_batch_rate = self.current_batch_wins / self.current_batch_games
            batch_status = f"进行中 ({self.current_batch_wins}/{self.current_batch_games})"
        else:
            if len(self.batch_win_rates) > 0:
                current_batch_rate = self.batch_win_rates[-1]
                batch_status = f"刚开始 (上批次: {current_batch_rate:.1%})"
            else:
                current_batch_rate = 0.0
                batch_status = "首批次开始"

        # 🔥 新增：惩罚系统状态
        penalty_system_status = "适应期" if current_batch_num <= self.effective_adaptation_period else "已激活"
        if self.is_resumed_training and self.skip_adaptation_on_resume:
            penalty_system_status += " (续训跳过适应期)"

        stats = {
            'games_played': self.game_count,
            'total_wins': self.win_count,
            'win_rate': total_win_rate,
            'recent_100_win_rate': recent_win_rate,
            'avg_moves_per_game': self.move_count / max(self.game_count, 1),
            'last_100_milestone': self.last_100_win_rate_milestone,
            'last_total_milestone': self.last_total_win_rate_milestone,
            'next_100_target': self._get_next_milestone(recent_win_rate, self.win_rate_rewards_100),
            'next_total_target': self._get_next_milestone(total_win_rate, self.win_rate_rewards_total),

            # 🔥 智能批次惩罚统计
            'current_batch_num': current_batch_num,
            'current_batch_progress': f"{self.current_batch_games}/{self.batch_size}",
            'current_batch_rate': current_batch_rate,
            'batch_status': batch_status,
            'completed_batches': len(self.batch_win_rates),
            'batch_win_rates': self.batch_win_rates,
            'total_penalties': self.total_penalties,
            'penalty_count': len(self.penalty_history),
            'recent_penalties': self.penalty_history[-3:] if self.penalty_history else [],

            # 🔥 新增智能惩罚系统状态
            'penalty_system_status': penalty_system_status,
            'effective_adaptation_period': self.effective_adaptation_period,
            'penalty_coefficient': self.penalty_coefficient,
            'consecutive_poor_batches': self.consecutive_poor_batches,
            'penalty_effectiveness': list(self.penalty_effectiveness_tracker),
            'coefficient_adjustments': len(self.penalty_adjustment_history),
            'is_resumed_training': self.is_resumed_training,

            # 🔥 新增：威胁检测统计
            'threat_defense_stats': self.threat_defense_stats.copy(),

            # 调试字段
            'debug_current_batch_wins': self.current_batch_wins,
            'debug_current_batch_games': self.current_batch_games
        }

        return stats

    def _get_next_milestone(self, current_rate, reward_dict):
        """获取下一个里程碑目标"""
        for threshold in sorted(reward_dict.keys()):
            if current_rate < threshold:
                return threshold
        return None

    def print_penalty_status(self):
        """🔥 新增：打印智能惩罚系统状态"""
        stats = self.get_stats()
        print(f"\n🧠 智能惩罚系统状态:")
        print(f"   系统状态: {stats['penalty_system_status']}")
        print(f"   当前系数: {stats['penalty_coefficient']:.1f}")
        print(f"   系数调整次数: {stats['coefficient_adjustments']}")

        if self.penalty_effectiveness_tracker:
            avg_effectiveness = np.mean(list(self.penalty_effectiveness_tracker))
            print(f"   惩罚效果: {avg_effectiveness:.1%}")

        if stats['consecutive_poor_batches'] > 0:
            print(f"   ⚠️ 连续表现不佳: {stats['consecutive_poor_batches']}批次")

        if self.penalty_adjustment_history:
            last_adjustment = self.penalty_adjustment_history[-1]
            print(f"   最近调整: 批次{last_adjustment['batch_num']} {last_adjustment['adjustment_type']} → {last_adjustment['new_coefficient']:.1f}")

    def print_threat_defense_status(self):
        """🔥 新增：打印威胁检测防守状态"""
        stats = self.threat_defense_stats
        total_threats = stats['total_threats_blocked']

        print(f"\n🛡️ 威胁检测防守统计:")
        print(f"   总威胁阻止: {total_threats}")
        if total_threats > 0:
            print(f"   🚨 关键威胁阻止: {stats['critical_threats_blocked']} ({stats['critical_threats_blocked']/total_threats*100:.1f}%)")
            print(f"   ⚠️ 高威胁阻止: {stats['high_threats_blocked']} ({stats['high_threats_blocked']/total_threats*100:.1f}%)")
            print(f"   📊 中威胁阻止: {stats['medium_threats_blocked']} ({stats['medium_threats_blocked']/total_threats*100:.1f}%)")
            print(f"   💡 低威胁阻止: {stats['low_threats_blocked']} ({stats['low_threats_blocked']/total_threats*100:.1f}%)")
            print(f"   🎯 多重威胁防守: {stats['multiple_threat_defenses']}")
            print(f"   💰 防守奖励总计: {stats['total_defense_bonus_awarded']:.2f}")

    def print_win_rate_status(self):
        """打印当前胜率状态 - 改进版"""
        stats = self.get_stats()
        print(f"\n📊 胜率状态报告:")
        print(f"   总局数: {stats['games_played']}")
        print(f"   总胜率: {stats['win_rate']*100:.1f}%")
        print(f"   最近100局胜率: {stats['recent_100_win_rate']*100:.1f}%")
        print(f"   100局里程碑: {stats['last_100_milestone']*100:.1f}%")
        print(f"   总体里程碑: {stats['last_total_milestone']*100:.1f}%")

        if stats['next_100_target']:
            print(f"   下个100局目标: {stats['next_100_target']*100:.1f}%")
        if stats['next_total_target']:
            print(f"   下个总体目标: {stats['next_total_target']*100:.1f}%")

        # 🔥 智能批次分析状态显示
        print(f"\n🚨 智能批次分析状态:")
        print(f"   当前批次: {stats['current_batch_num']}")
        print(f"   系统状态: {stats['penalty_system_status']}")

        if self.current_batch_games > 0:
            print(f"   当前批次胜率: {stats['current_batch_rate']*100:.1f}% (实时)")
        elif len(self.batch_win_rates) > 0:
            print(f"   当前批次胜率: {stats['current_batch_rate']*100:.1f}% (参考上批次)")
        else:
            print(f"   当前批次胜率: 等待中 (首批次)")

        print(f"   已完成批次: {stats['completed_batches']}")
        print(f"   累计惩罚: {stats['total_penalties']:.1f}分")
        print(f"   惩罚次数: {stats['penalty_count']}")

        # 🔥 显示最近批次趋势
        if len(self.batch_win_rates) >= 3:
            recent_batches = self.batch_win_rates[-3:]
            print(f"   最近3批次胜率: {[f'{rate:.1%}' for rate in recent_batches]}")

            if len(recent_batches) >= 2:
                trend = recent_batches[-1] - recent_batches[-2]
                trend_icon = "📈" if trend > 0.02 else "📉" if trend < -0.02 else "📊"
                print(f"   胜率趋势: {trend_icon} {trend:+.1%}")

        # 🔥 智能惩罚系统状态
        self.print_penalty_status()

        # 🔥 威胁检测防守状态
        # self.print_threat_defense_status()

    def print_batch_analysis(self):
        """🔥 改进：打印批次分析报告"""
        if len(self.batch_win_rates) < 3:
            print("📋 批次分析: 数据不足，需要至少3个完成的批次")
            return

        print(f"\n📋 智能批次胜率分析报告:")
        print(f"{'批次':<8} {'胜率':<8} {'趋势':<8} {'vs前2批次':<12} {'惩罚':<10}")
        print("-" * 55)

        for i, rate in enumerate(self.batch_win_rates):
            batch_num = i + 1
            trend = ""
            vs_prev = ""
            penalty_info = ""

            # 计算趋势
            if i > 0:
                diff = rate - self.batch_win_rates[i-1]
                if diff > 0.02:
                    trend = "📈↗️"
                elif diff < -0.02:
                    trend = "📉↘️"
                else:
                    trend = "📊→"

            # 计算与前2批次的对比
            if i >= 2:
                compare_idx = i - 2
                compare_rate = self.batch_win_rates[compare_idx]
                diff = rate - compare_rate
                if diff > 0:
                    vs_prev = f"✅+{diff:.1%}"
                elif diff < -0.01:
                    vs_prev = f"⚠️{diff:.1%}"
                else:
                    vs_prev = "🔄≈"

            # 惩罚信息
            for penalty_batch, penalty_amount, _ in self.penalty_history:
                if penalty_batch == batch_num:
                    penalty_info = f"🚨{penalty_amount:.0f}"
                    break
            if not penalty_info:
                penalty_info = "✅无"

            print(f"{batch_num:<8} {rate:<8.1%} {trend:<8} {vs_prev:<12} {penalty_info:<10}")

        # 显示待完成批次进度
        if self.current_batch_games > 0:
            current_rate = self.current_batch_wins / self.current_batch_games
            current_batch_num = len(self.batch_win_rates) + 1
            progress_info = f"进度{self.current_batch_games}/100"
            print(f"{current_batch_num:<8} {current_rate:<8.1%} {'🔄进行中':<8} {progress_info:<12} {'预测中':<10}")

        # 🔥 显示系数调整历史
        if self.penalty_adjustment_history:
            print(f"\n🔧 惩罚系数调整历史:")
            for adj in self.penalty_adjustment_history[-3:]:  # 显示最近3次调整
                print(f"   批次{adj['batch_num']}: {adj['old_coefficient']:.1f} → {adj['new_coefficient']:.1f} ({adj['adjustment_type']}) - {adj['reason']}")

    def set_penalty_config(self, coefficient=None, max_penalty=None, min_threshold=None, adaptation_period=None):
        """🔥 改进：动态调整惩罚配置"""
        if coefficient is not None:
            self.penalty_coefficient = coefficient
        if max_penalty is not None:
            self.max_penalty = max_penalty
        if min_threshold is not None:
            self.min_penalty_threshold = min_threshold
        if adaptation_period is not None:
            self.adaptation_period = adaptation_period
            # 如果不是续训模式，更新有效适应期
            if not self.is_resumed_training:
                self.effective_adaptation_period = adaptation_period

        print(f"🔧 惩罚配置已更新:")
        print(f"   系数={self.penalty_coefficient}, 上限={self.max_penalty}")
        print(f"   阈值={self.min_penalty_threshold:.1%}, 适应期={self.adaptation_period}批次")
        print(f"   有效适应期={self.effective_adaptation_period}批次")

    def set_threat_detection_config(self, **kwargs):
        """🔥 新增：设置威胁检测配置"""
        for key, value in kwargs.items():
            if key in self.threat_detection_config:
                self.threat_detection_config[key] = value
                print(f"🛡️ 威胁检测配置更新: {key} = {value}")
            else:
                print(f"⚠️ 未知的威胁检测配置项: {key}")

    def get_penalty_recommendations(self):
        """🔥 改进：获取智能惩罚策略建议"""
        if len(self.penalty_history) < 3:
            return "数据不足，无法提供建议"

        recent_penalties = [p[1] for p in self.penalty_history[-5:]]
        avg_penalty = sum(recent_penalties) / len(recent_penalties)

        recommendations = []

        # 基于惩罚效果的建议
        if self.penalty_effectiveness_tracker:
            avg_effectiveness = np.mean(list(self.penalty_effectiveness_tracker))
            if avg_effectiveness < 0.3:
                recommendations.append(f"惩罚效果低({avg_effectiveness:.1%})，建议增加惩罚系数")
            elif avg_effectiveness > 0.8:
                recommendations.append(f"惩罚效果过强({avg_effectiveness:.1%})，可适当降低系数")

        # 基于连续表现的建议
        if self.consecutive_poor_batches >= 3:
            recommendations.append(f"连续{self.consecutive_poor_batches}批次表现不佳，建议检查训练策略")

        # 基于惩罚频率的建议
        if len(self.penalty_history) > len(self.batch_win_rates) * 0.6:
            recommendations.append("惩罚频率过高，建议降低惩罚阈值或检查训练稳定性")

        # 基于系数调整的建议
        if len(self.penalty_adjustment_history) > 10:
            recommendations.append("惩罚系数频繁调整，建议稳定训练环境")

        # 🔥 新增：基于威胁检测的建议
        threat_stats = self.threat_defense_stats
        if threat_stats['total_threats_blocked'] > 0:
            critical_ratio = threat_stats['critical_threats_blocked'] / threat_stats['total_threats_blocked']
            if critical_ratio > 0.3:
                recommendations.append(f"关键威胁阻止率高({critical_ratio:.1%})，防守能力良好")
            elif critical_ratio < 0.1:
                recommendations.append(f"关键威胁阻止率低({critical_ratio:.1%})，建议加强防守训练")

        return recommendations if recommendations else ["智能惩罚系统运行良好"]

    def get_penalty_summary(self):
        """🔥 新增：获取智能惩罚系统摘要"""
        stats = self.get_stats()

        summary = {
            'system_status': stats['penalty_system_status'],
            'current_coefficient': stats['penalty_coefficient'],
            'effective_adaptation_period': stats['effective_adaptation_period'],
            'consecutive_poor_batches': stats['consecutive_poor_batches'],
            'penalty_effectiveness': list(self.penalty_effectiveness_tracker),
            'avg_effectiveness': np.mean(list(self.penalty_effectiveness_tracker)) if self.penalty_effectiveness_tracker else 0,
            'coefficient_adjustments': stats['coefficient_adjustments'],
            'recent_adjustments': self.penalty_adjustment_history[-3:] if self.penalty_adjustment_history else [],
            'recommendations': self.get_penalty_recommendations(),
            'is_resumed_training': self.is_resumed_training,
            # 🔥 新增：威胁检测摘要
            'threat_defense_summary': {
                'total_threats_blocked': self.threat_defense_stats['total_threats_blocked'],
                'critical_defense_rate': self.threat_defense_stats['critical_threats_blocked'] / max(self.threat_defense_stats['total_threats_blocked'], 1),
                'total_defense_bonus': self.threat_defense_stats['total_defense_bonus_awarded'],
                'multiple_threat_defenses': self.threat_defense_stats['multiple_threat_defenses']
            }
        }

        return summary