from dataclasses import dataclass
from typing import Dict, Any, Optional


@dataclass
class GomokuDQNConfig:
    """五子棋DQN配置参数 - 统一配置管理"""

    # ==================== 基础游戏配置 ====================
    board_size: int = 15
    max_game_length: int = 450  # 15x15棋盘最多450步，防止无限循环

    # ==================== 网络架构配置 ====================
    network_config: str = 'large'  # 'small', 'medium', 'large', 'xlarge'
    task_type: str = 'classification'  # 'classification', 'regression', 'multi_task'
    num_classes: int = 225  # 15*15 = 225个可能的落子位置
    use_noisy: bool = True  # 是否使用NoisyNet
    dropout_rate: float = 0.1
    history_steps: int = 4  # 历史步数
    attention_type: str = 'cbam'  # 'cbam', 'self_attention', None
    attention_freq: int = 4  # 每多少层使用一次注意力机制

    # ==================== DQN核心参数 ====================
    learning_rate: float = 1e-4
    gamma: float = 0.99  # 折扣因子
    epsilon_start: float = 1.0
    epsilon_end: float = 0.05
    epsilon_decay: float = 1000
    epsilon_group_steps: int = 64  # 步数分组基数，控制 ε 更新的频率
    dqn_batch_size: int = 64
    replay_buffer_size: int = 100000  # 这意味着缓冲区最多存储100000个经验样本。当超过这个数量时，旧的经验会被新的经验覆盖（通常采用FIFO策略）。
    target_update_freq: int = 1000

    # ==================== 高级DQN特性 ====================
    # Multi-step DQN
    n_step: int = 3
    multi_step_gamma: float = 0.99

    # Double DQN
    double_dqn: bool = True

    # 目标网络更新
    soft_target_update: bool = True
    target_update_tau: float = 0.005

    # ==================== 训练稳定性配置 ====================
    grad_clip_norm: float = 1.0  # 梯度裁剪的参数
    weight_decay: float = 1e-5
    use_huber_loss: bool = True
    max_q_value: float = 100.0

     # 位置编码配置
    use_positional_encoding: bool = True
    learnable_pos_encoding: bool = True  # True=可学习，False=固定sin/cos编码
    pos_encoding_temperature: float = 10000.0  # 仅用于sin/cos编码

    # ==================== 学习率调度配置 ====================
    lr_schedule = 'warmup_cosine'  # 'step', 'exponential', 'cosine', 'warmup_cosine'
    lr_step_size: int = 5000
    lr_gamma: float = 0.9
    lr_cosine_steps: int = 50000
    lr_update_freq: int = 100
    warmup_steps: int = 1000  # 预热步数

    # ==================== 优先级经验回放配置 ====================
    use_prioritized_replay: bool = True
    priority_alpha: float = 0.6
    priority_beta_start: float = 0.4
    priority_beta_end: float = 1.0
    priority_epsilon: float = 1e-6

    # ==================== 自对弈配置 ====================
    self_play_episodes: int = 1000
    evaluation_freq: int = 100  # 每多少episode评估一次
    evaluation_games: int = 50  # 每次评估的游戏数量

    # ==================== MCTS混合采样配置 ====================
    use_mcts_sampling: bool = True
    mcts_simulations: int = 50
    mcts_c_puct: float = 1.0
    mcts_sampling_ratio: float = 0.2  # MCTS样本占比
    mcts_update_freq: int = 500  # 每多少episode使用一次MCTS

    # ==================== 并行训练配置 ====================
    num_actors: int = 4
    actor_update_freq: int = 100

    # ==================== 断点续训配置 ====================
    checkpoint_dir: str = 'checkpoints'
    checkpoint_freq: int = 5000  # 每5000个episode保存一次检查点
    auto_save: bool = True

    # ==================== 训练监控和打印配置 ====================
    monitor_training: bool = True  # 是否启用训练监控
    training_stats_batch_freq: int = 10  # 每多少episode批次进行统计
    print_batch_analysis_freq: int = 5  # 每多少批次打印批次分析

    # ==================== 高级分析和日志配置 ====================
    enable_advanced_analysis: bool = True  # 是否启用高级分析图表
    analysis_save_interval: int = 2000  # 每多少episode保存分析图表
    log_save_freq: int = 1000  # 每多少episode保存训练日志

    # ==================== 威胁检测奖励配置 ====================
    threat_detection_config: Dict[str, Any] = None

    THREAT_LEVELS: Dict[str, Any] = None
    # ==================== 胜率奖励系统配置 ====================
    enable_win_rate_rewards: bool = True
    win_rate_min_games_for_recent: int = 20
    win_rate_min_games_for_total: int = 50

    # 最近批次局胜率奖励配置
    win_rate_rewards_in_batch: Dict[float, float] = None

    # ==================== 批次胜率惩罚系统配置 ====================
    enable_batch_penalty: bool = True  # 🔛 系统开关
    batch_adaptation_period: int = 20
    batch_compare_interval: int = 2

    batch_penalty_coefficient: int = 10  # 💰 惩罚系数
    adjustment_rate = 0.15  # 调整幅度
    min_coefficient = 3.0  # 最小系数
    max_coefficient = 20.0  # 最大系数
    target_effectiveness = 0.6  # 目标效果阈值

    batch_max_penalty: int = 500  # 🚨 最大惩罚值
    batch_min_penalty_threshold: float = 0.01  # 📉 最小触发阈值（1%）

    # 批次惩罚高级配置
    batch_penalty_decay: bool = True
    batch_penalty_decay_factor: float = 1.2
    batch_reward_recovery: bool = True
    batch_recovery_reward_factor: float = 0.5

    # ==================== 奖励系统配置 ====================
    reward_shaping: bool = True
    intermediate_reward_scale: float = 0.25  # 中间奖励缩放系数
    win_reward: float = 20.0  # 获胜奖励
    lose_penalty: float = -20.0  # 失败惩罚
    invalid_move_penalty: float = -50.0  # 非法落子惩罚

    # ==================== 探索策略配置 ====================
    # 五子棋专用探索配置
    gomoku_epsilon_start: float = 0.5
    gomoku_epsilon_end: float = 0.05
    gomoku_epsilon_decay: int = 20000

    # 自适应epsilon配置
    adaptive_epsilon: bool = True
    epsilon_adapt_window: int = 100
    epsilon_adapt_threshold: float = 0.05
    epsilon_boost_factor: float = 1.5

    # ==================== 网络架构选择配置 ====================
    use_conv_network: bool = True  # 是否使用卷积网络（适合棋盘游戏）
    use_residual_connections: bool = True
    hidden_dim: int = 512

    # ==================== Buffer配置 ====================
    min_buffer_size: int = 500
    buffer_size: int = 50000

    # ==================== 高级特性配置 ====================
    advanced_per: bool = True  # 高级优先级经验回放
    per_priority_strategy: str = 'td_error'  # 'td_error', 'loss', 'mixed'
    use_noisy_networks: bool = False
    use_n_step_returns: bool = True

    # ==================== 专家棋谱配置 ====================
    use_expert_data: bool = True  # 是否使用专家棋谱
    expert_data_path: str = 'train/expert/data'  # 专家棋谱文件夹路径
    expert_data_ratio: float = 0.7  # 专家数据在训练中的比例
    expert_sampling_strategy: str = 'uniform'  # 'uniform', 'prioritized', 'curriculum'
    expert_reward_scale: float = 1.0  # 专家数据奖励缩放因子
    expert_data_augmentation: bool = True  # 是否对专家数据进行数据增强（旋转、翻转）

    def __post_init__(self):
        """初始化后处理"""

        # 初始化威胁检测配置
        if self.threat_detection_config is None:
            self.threat_detection_config = {
                'critical_defense_bonus': 2.0,  # 阻止立即获胜的额外奖励倍数
                'high_defense_bonus': 1.5,  # 阻止活四的额外奖励倍数
                'medium_defense_bonus': 1.0,  # 阻止活三的额外奖励倍数
                'multiple_threat_bonus': 0.5,  # 多重威胁防守额外奖励
                'enable_detailed_logging': False  # 是否启用详细的威胁检测日志
            }

        # 威胁等级定义
        if self.THREAT_LEVELS is None:
            self.THREAT_LEVELS = {
                'CRITICAL': 160.0,  # 对手立即获胜 (4子连线)
                'HIGH': 80.0,  # 对手形成活四
                'MEDIUM': 40.0,  # 对手形成活三
                'LOW': 0.5  # 对手形成活二
            }

        # 初始化胜率奖励配置
        if self.win_rate_rewards_in_batch is None:
            self.win_rate_rewards_in_batch = {
                0.50: 50, 0.60: 150, 0.70: 200, 0.80: 250,
                0.90: 300, 0.95: 350, 0.98: 400
            }
