from __future__ import annotations

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from typing import Optional, Tuple


class MLPFeatureExtractor(nn.Module):
    """
    全连接特征提取器：将地图展平 + 拼接代理特征，输入 MLP
    输入: map_matrix [B, H, W, C], agent_state [B, F]
    输出: [B, hidden_final]
    """

    def __init__(self,
                 state_shape: tuple,
                 agent_feature_size: int = 5,
                 hidden_layers: list = None):
        super(MLPFeatureExtractor, self).__init__()
        if hidden_layers is None:
            hidden_layers = [1280, 1600, 1600, 512, 256, 128]
        self.state_shape = state_shape
        H, W, C = state_shape
        map_input_size = H * W * C
        total_input_size = map_input_size + agent_feature_size

        layers = []
        input_size = total_input_size

        for hidden_size in hidden_layers:
            layers.append(nn.Linear(input_size, hidden_size))
            layers.append(nn.ReLU())
            input_size = hidden_size

        self.network = nn.Sequential(*layers)
        self.output_size = hidden_layers[-1]  # 最后一层的输出维度

    def forward(self, map_matrix: torch.Tensor, agent_state: torch.Tensor) -> torch.Tensor:
        # map_matrix shape: [B, C, H, W] 
        if map_matrix.dim() == 4 and map_matrix.shape[-1] == self.state_shape[2]:  # C是map_matrix.shape的最后一维
            map_matrix = map_matrix.permute(0, 3, 1, 2)  # [B, H, W, C] → [B, C, H, W]

        # 展平地图
        map_flat = map_matrix.flatten(start_dim=1)  # [B, H*W*C]

        # 拼接代理特征
        combined = torch.cat([map_flat, agent_state], dim=1)  # [B, H*W*C + F]
        out_decision_feature = self.network(combined)

        return out_decision_feature  # shape: 128


class CNNFeatureExtractor(nn.Module):
    """
    卷积神经网络特征提取器：使用 CNN 处理地图，再拼接代理特征
    输入: map_matrix [B, H, W, C], agent_state [B, F]
    输出: [B, hidden_final]
    """

    def __init__(self, state_shape: tuple, agent_feature_size: int = 5,
                 cnn_channels=None,
                 kernel_sizes=None,
                 strides=None,
                 paddings=None,
                 hidden_layers=None):
        super(CNNFeatureExtractor, self).__init__()

        if hidden_layers is None:
            hidden_layers = [1024, 512, 256, 128]
        if paddings is None:
            paddings = [1, 1, 0, 1, 1]
        if strides is None:
            strides = [1, 1, 2, 2, 1]
        if kernel_sizes is None:
            kernel_sizes = [3, 3, 3, 3, 3]
        if cnn_channels is None:
            cnn_channels = [64, 128, 256, 128, 64]
        H, W, C = state_shape
        self.state_shape = state_shape
        self.agent_feature_size = agent_feature_size
        self.cnn_channels = cnn_channels
        # 构建 CNN 层
        conv_layers = []
        in_channels = C

        for idx, out_channels in enumerate(self.cnn_channels):
            conv_layers.append(nn.Conv2d(in_channels, out_channels,
                                         kernel_size=kernel_sizes[idx],
                                         stride=strides[idx],
                                         padding=paddings[idx]))
            conv_layers.append(nn.ReLU())
            in_channels = out_channels

        self.conv_net = nn.Sequential(*conv_layers)

        # 计算 CNN 输出尺寸
        with torch.no_grad():
            dummy_input = torch.zeros(1, C, H, W)
            dummy_output = self.conv_net(dummy_input)
            cnn_output_size = dummy_output.numel()  # 计算总元素数量

        print(f"CNN输出形状: {dummy_output.shape}")

        # 构建融合后的全连接层
        total_input_size = cnn_output_size + agent_feature_size
        layers = []
        input_size = total_input_size

        for hidden_size in hidden_layers:
            layers.append(nn.Linear(input_size, hidden_size))
            layers.append(nn.ReLU())
            input_size = hidden_size

        self.fc_net = nn.Sequential(*layers)
        self.output_size = hidden_layers[-1]  # 最终输出维度

    def forward(self, map_matrix: torch.Tensor, agent_state: torch.Tensor):
        # 转换为 CHW 格式
        if map_matrix.dim() == 4 and map_matrix.shape[-1] == self.state_shape[2]:
            map_matrix = map_matrix.permute(0, 3, 1, 2)  # [B, H, W, C] → [B, C, H, W]

        # CNN 提取空间特征
        cnn_features = self.conv_net(map_matrix)  # [B, C_out, H, W]

        # 展平
        cnn_flat = cnn_features.flatten(start_dim=1)  # [B, C_out * H * W]

        # 拼接代理特征
        combined = torch.cat([cnn_flat, agent_state], dim=1)  # [B, cnn_out + agent_f]

        out_decision_feature = self.fc_net(combined)
        return out_decision_feature  # shape: 128


class DeepQNetwork(nn.Module):
    """
    深度Q网络
    支持DQN及其变体:Double DQN, Dueling DQN, Prioritized Experience Replay等
    """

    def __init__(self,
                 state_shape: tuple,
                 action_size: int,
                 learning_rate=0.001,
                 gamma=0.99,
                 epsilon=1.0,
                 epsilon_min=0.01,
                 epsilon_decay=0.995,
                 batch_size=64,
                 target_update_freq=1000,
                 feature_extractor_type: str = 'cnn',
                 agent_feature_size: int = 5,  # health, attack, score
                 use_double_dqn=True,
                 use_dueling=False,
                 # --- MLP 参数 ---
                 mlp_hidden_layers=None,
                 # --- CNN 参数 ---
                 cnn_channels=None,
                 cnn_kernel_sizes=None,
                 cnn_strides=None,
                 cnn_paddings=None,
                 cnn_fc_hidden_layers=None
                 ):
        """
        初始化DQN基类
        
        参数:
            state_shape: 状态空间的形状
            action_size: 动作空间的大小
            hidden_layers: 隐藏层神经元数量列表
            learning_rate: 学习率
            gamma: 折扣因子
            epsilon: 探索率初始值
            epsilon_min: 最小探索率
            epsilon_decay: 探索率衰减率
            memory_size: 经验回放缓冲区大小
            batch_size: 训练批大小
            target_update_freq: 目标网络更新频率
            use_double_dqn: 是否使用Double DQN
            use_dueling: 是否使用Dueling DQN架构
            use_per: 是否使用优先经验回放
        """
        super(DeepQNetwork, self).__init__()

        if cnn_fc_hidden_layers is None:
            cnn_fc_hidden_layers = [512, 256, 128]
        if cnn_paddings is None:
            cnn_paddings = [1, 1, 0, 1, 1]
        if cnn_strides is None:
            cnn_strides = [1, 1, 2, 2, 1]
        if cnn_kernel_sizes is None:
            cnn_kernel_sizes = [3, 3, 3, 3, 3]
        if cnn_channels is None:
            cnn_channels = [64, 128, 256, 128, 64]
        if mlp_hidden_layers is None:
            mlp_hidden_layers = [1280, 1600, 1600, 512, 256, 128]
        self.state_shape = state_shape
        self.action_size = action_size
        self.lr = learning_rate
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_decay
        self.batch_size = batch_size
        self.target_update_freq = target_update_freq
        self.use_double_dqn = use_double_dqn
        self.use_dueling = use_dueling
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        self.feature_extractor = None
        self.norm_output_layer = None
        self.feature_extractor_type = feature_extractor_type
        # ========== 构建特征提取器 ========== 
        if self.feature_extractor_type == 'mlp':
            self.feature_extractor = MLPFeatureExtractor(
                state_shape=state_shape,
                agent_feature_size=agent_feature_size,
                hidden_layers=mlp_hidden_layers)
        elif self.feature_extractor_type == 'cnn':
            self.feature_extractor = CNNFeatureExtractor(
                state_shape=state_shape,
                agent_feature_size=agent_feature_size,
                cnn_channels=cnn_channels,
                kernel_sizes=cnn_kernel_sizes,
                strides=cnn_strides,
                paddings=cnn_paddings,
                hidden_layers=cnn_fc_hidden_layers)
        else:
            raise ValueError(f"Unsupported feature_extractor_type: {feature_extractor_type}")

        self.feature_dim = self.feature_extractor.output_size

        # ========== 构建动作Q-value输出层 ========== 
        if self.use_dueling:
            self.value_stream = nn.Sequential(
                nn.Linear(self.feature_dim, 64),
                nn.ReLU(),
                nn.Linear(64, 1))
            self.advantage_stream = nn.Sequential(
                nn.Linear(self.feature_dim, 64),
                nn.ReLU(),
                nn.Linear(64, self.action_size))
        else:  # 普通输出层：直接映射到动作空间
            self.norm_output_layer = nn.Linear(self.feature_dim, self.action_size)

        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)

        self.loss_fn = nn.MSELoss()

        self.train_step = 0
        print(f"[DQN] 初始化成功：{feature_extractor_type.upper()} + \
              {'Dueling' if use_dueling else 'Normal'} | Device: {self.device}")

    def forward(self, map_matrix: torch.Tensor, agent_state: torch.Tensor):
        """ 前向传播,
        输入地图和代理特征 → 提取特征 → 输出 Q 值 
        map_matrix: [B, H, W, C]  或 [B, C, H, W] 
        agent_state: [B, F] F=3 """
        features = self.feature_extractor(map_matrix, agent_state)  # [B, feature_dim]

        if self.use_dueling:
            value = self.value_stream(features)  # [B, 1]
            advantage = self.advantage_stream(features)  # [B, A]
            # Dueling 公式：Q = V + (A - mean(A)) 
            q_values = value + advantage - advantage.mean(dim=1, keepdim=True)
            return q_values
        else:
            return self.norm_output_layer(features)

    def replay(self,
               target_network: DeepQNetwork,
               states_old: torch.Tensor,  # [B, H, W, C] 或 [B, C, H, W]
               states_agent: torch.Tensor,  # [B, F]
               actions: torch.Tensor,  # [B]
               rewards: torch.Tensor,  # [B] ← 已经是 N-step 累积后的奖励！
               next_states: torch.Tensor,  # [B, H, W, C]
               next_states_agent: torch.Tensor,  # [B, F]
               weights: Optional[torch.Tensor] = None,  # [B] ← PER 权重
               n_steps=3
               ) -> Tuple[float, np.ndarray]:
        """
        执行一次训练步骤
        返回: (loss_value, td_errors)
        """
        # 当前Q值
        current_q_values = self.forward(states_old, states_agent).gather(1, actions.unsqueeze(1)).squeeze(1)  # [B]

        # 目标Q值（Double DQN 或 标准DQN）
        with torch.no_grad():
            if self.use_double_dqn:
                # Double DQN: 主网络选动作，目标网络评估
                next_actions = self.forward(next_states, next_states_agent).argmax(1)  # [B]
                next_q_values = target_network.forward(next_states, next_states_agent).gather(1, next_actions.unsqueeze(
                    1)).squeeze(1)  # [B]
            else:
                # 标准DQN
                next_q_values = target_network.forward(next_states, next_states_agent).max(1)[0]  # [B]

            # 注意：rewards 已经是 N-step 累积奖励！
            N_STEP = n_steps
            target_q_values = rewards + (self.gamma ** N_STEP) * next_q_values  # [B]

        # 计算TD误差（用于更新PER优先级）
        td_errors = (current_q_values - target_q_values).detach().cpu().numpy()  # [B]

        # 计算损失
        loss = (current_q_values - target_q_values).pow(2)  # [B]
        if weights is not None:
            loss *= weights  # PER 加权
        loss = loss.mean()

        # 反向传播
        self.optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.parameters(), 1.0)  # 梯度裁剪
        self.optimizer.step()

        # 衰减探索率（建议移到 ModelManager），暂未位移
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

        self.train_step += 1

        return loss.item(), td_errors

    def save(self, filepath):
        """保存模型"""
        torch.save({
            'model_state_dict': self.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'epsilon': self.epsilon,
            'train_step': self.train_step
        }, filepath)

    def load(self, filepath):
        """加载模型"""
        checkpoint = torch.load(filepath)
        self.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.epsilon = checkpoint['epsilon']
        self.train_step = checkpoint['train_step']

    def get_config(self):
        """获取模型配置"""
        return {
            'state_shape': self.state_shape,
            'action_size': self.action_size,
            'hidden_layers': self.hidden_layers,
            'learning_rate': self.lr,
            'gamma': self.gamma,
            'epsilon': self.epsilon,
            'epsilon_min': self.epsilon_min,
            'epsilon_decay': self.epsilon_decay,
            'memory_size': self.memory_size,
            'batch_size': self.batch_size,
            'target_update_freq': self.target_update_freq,
            'use_double_dqn': self.use_double_dqn,
            'use_dueling': self.use_dueling,
            'use_per': self.use_per
        }
