import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import random
from typing import List, Tuple, Dict, Any, Optional
import math

from agent.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer


class NoisyLinear(nn.Module):
    """
    Noisy Networks的线性层实现，用于替代ε-greedy探索策略
    """
    def __init__(self, in_features, out_features, std_init=0.5):
        super(NoisyLinear, self).__init__()
        
        self.in_features = in_features
        self.out_features = out_features
        self.std_init = std_init
        
        # 可学习的参数
        self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))
        self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))
        self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))
        
        self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))
        self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))
        self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))
        
        # 初始化参数
        self.reset_parameters()
        self.reset_noise()
    
    def reset_parameters(self):
        """初始化可学习参数"""
        mu_range = 1 / math.sqrt(self.in_features)
        self.weight_mu.data.uniform_(-mu_range, mu_range)
        self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
        
        self.bias_mu.data.uniform_(-mu_range, mu_range)
        self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
    
    def _scale_noise(self, size):
        """缩放噪声"""
        x = torch.randn(size)
        return x.sign().mul(x.abs().sqrt())
    
    def reset_noise(self):
        """重置噪声"""
        epsilon_in = self._scale_noise(self.in_features)
        epsilon_out = self._scale_noise(self.out_features)
        
        self.weight_epsilon.copy_(epsilon_out.outer(epsilon_in))
        self.bias_epsilon.copy_(epsilon_out)
    
    def forward(self, x):
        """前向传播"""
        if self.training:
            weight = self.weight_mu + self.weight_sigma * self.weight_epsilon
            bias = self.bias_mu + self.bias_sigma * self.bias_epsilon
        else:
            weight = self.weight_mu
            bias = self.bias_mu
        
        return F.linear(x, weight, bias)


class DuelingDQN(nn.Module):
    """
    Dueling DQN 网络结构
    将 Q 值分解为状态价值函数 V(s) 和优势函数 A(s,a)
    Q(s,a) = V(s) + A(s,a) - mean(A(s,a'))
    """
    def __init__(self, state_dim: int, action_dim: int, hidden_dim: int = 128, use_noisy: bool = False):
        """
        初始化网络
        
        Args:
            state_dim: 状态维度
            action_dim: 动作维度
            hidden_dim: 隐藏层维度
            use_noisy: 是否使用Noisy Networks
        """
        super(DuelingDQN, self).__init__()
        
        self.use_noisy = use_noisy
        
        # 特征提取层
        self.feature_layer = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU()
        )
        
        # 状态价值流
        if use_noisy:
            self.value_stream = nn.Sequential(
                NoisyLinear(hidden_dim, hidden_dim // 2),
                nn.ReLU(),
                NoisyLinear(hidden_dim // 2, 1)
            )
            
            # 优势流
            self.advantage_stream = nn.Sequential(
                NoisyLinear(hidden_dim, hidden_dim // 2),
                nn.ReLU(),
                NoisyLinear(hidden_dim // 2, action_dim)
            )
        else:
            self.value_stream = nn.Sequential(
                nn.Linear(hidden_dim, hidden_dim // 2),
                nn.ReLU(),
                nn.Linear(hidden_dim // 2, 1)
            )
            
            # 优势流
            self.advantage_stream = nn.Sequential(
                nn.Linear(hidden_dim, hidden_dim // 2),
                nn.ReLU(),
                nn.Linear(hidden_dim // 2, action_dim)
            )
    
    def forward(self, state: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        
        Args:
            state: 状态张量
            
        Returns:
            torch.Tensor: Q 值
        """
        features = self.feature_layer(state)
        
        value = self.value_stream(features)
        advantages = self.advantage_stream(features)
        
        # 计算 Q 值: Q(s,a) = V(s) + A(s,a) - mean(A(s,a'))
        q_values = value + advantages - advantages.mean(dim=1, keepdim=True)
        
        return q_values
    
    def reset_noise(self):
        """重置所有噪声层的噪声"""
        if not self.use_noisy:
            return
            
        for module in self.modules():
            if isinstance(module, NoisyLinear):
                module.reset_noise()


class DQNAgent:
    """
    DQN 智能体，用于路径规划
    """
    def __init__(self, state_dim: int, action_dim: int, config: Dict[str, Any], use_double_dqn: bool = True, 
                 use_prioritized: bool = True, use_noisy: bool = False, n_step: int = 1, device: str = None):
        """
        初始化 DQN 智能体
        
        Args:
            state_dim: 状态维度
            action_dim: 动作维度
            config: 配置参数
            use_double_dqn: 是否使用 Double DQN
            use_prioritized: 是否使用优先经验回放
            use_noisy: 是否使用Noisy Networks
            n_step: n步TD学习的步数
            device: 计算设备
        """
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.config = config
        self.use_double_dqn = use_double_dqn
        self.use_prioritized = use_prioritized
        self.use_noisy = use_noisy
        self.n_step = n_step
        
        # 设置设备
        if device is None:
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        else:
            self.device = torch.device(device)
        
        # 超参数
        self.gamma = config.get('gamma', 0.99)
        self.epsilon = config.get('epsilon_start', 1.0)
        self.epsilon_end = config.get('epsilon_end', 0.01)
        self.epsilon_decay = config.get('epsilon_decay', 0.995)
        self.learning_rate = config.get('learning_rate', 0.001)
        self.batch_size = config.get('batch_size', 64)
        self.target_update = config.get('target_update', 10)
        
        # 创建网络
        self.policy_net = DuelingDQN(state_dim, action_dim, hidden_dim=128, use_noisy=use_noisy).to(self.device)
        self.target_net = DuelingDQN(state_dim, action_dim, hidden_dim=128, use_noisy=use_noisy).to(self.device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()  # 目标网络不训练
        
        # 优化器
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=self.learning_rate)
        
        # 经验回放缓冲区
        if use_prioritized:
            self.memory = PrioritizedReplayBuffer(config.get('memory_size', 10000))
        else:
            self.memory = ReplayBuffer(config.get('memory_size', 10000))
        
        # n步TD学习缓冲区
        self.n_step_buffer = []
        
        self.steps_done = 0
        self.episode = 0
    
    def select_action(self, state: np.ndarray, eval_mode: bool = False) -> int:
        """
        选择动作
        
        Args:
            state: 当前状态
            eval_mode: 是否为评估模式（不使用探索）
            
        Returns:
            int: 选择的动作
        """
        if self.use_noisy:
            # 使用Noisy Networks时，不需要ε-greedy探索
            with torch.no_grad():
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
                q_values = self.policy_net(state_tensor)
                return q_values.max(1)[1].item()
        else:
            # 使用ε-greedy探索
            if eval_mode:
                # 评估模式，直接使用贪婪策略
                with torch.no_grad():
                    state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
                    q_values = self.policy_net(state_tensor)
                    return q_values.max(1)[1].item()
            
            # 训练模式，使用 ε-greedy 策略
            self.steps_done += 1
            
            # 随机探索
            if random.random() < self.epsilon:
                return random.randrange(self.action_dim)
            
            # 贪婪选择
            with torch.no_grad():
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
                q_values = self.policy_net(state_tensor)
                return q_values.max(1)[1].item()
    
    def update_epsilon(self):
        """更新探索率"""
        if not self.use_noisy:
            self.epsilon = max(self.epsilon_end, self.epsilon * self.epsilon_decay)
    
    def update_target_network(self):
        """更新目标网络"""
        self.target_net.load_state_dict(self.policy_net.state_dict())
    
    def _get_n_step_info(self, n_step_buffer, gamma):
        """获取n步TD学习的信息"""
        state, action, reward, next_state, done = n_step_buffer[0]
        
        for i in range(1, len(n_step_buffer)):
            r, next_s, d = n_step_buffer[i][2:5]
            reward += gamma ** i * r
            next_state, done = next_s, d
            if d:
                break
                
        return state, action, reward, next_state, done
    
    def store_transition(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, done: bool):
        """
        存储经验
        
        Args:
            state: 当前状态
            action: 执行的动作
            reward: 获得的奖励
            next_state: 下一个状态
            done: 是否结束
        """
        # 对于n步TD学习
        if self.n_step > 1:
            self.n_step_buffer.append((state, action, reward, next_state, done))
            
            # 如果缓冲区未满，不进行存储
            if len(self.n_step_buffer) < self.n_step:
                return
                
            # 获取n步信息
            state, action, reward, next_state, done = self._get_n_step_info(self.n_step_buffer, self.gamma)
            
            # 移除最早的经验
            self.n_step_buffer.pop(0)
            
            # 如果结束，清空缓冲区
            if done:
                self.n_step_buffer = []
        
        # 存储经验
        self.memory.push(state, action, reward, next_state, done)
    
    def learn(self, return_q_value=False) -> Optional[float]:
        """
        从经验中学习
        
        Args:
            return_q_value: 是否返回Q值
            
        Returns:
            Optional[Tuple[float, float]]: (损失值, 平均Q值) 如果return_q_value=True
            Optional[float]: 损失值 如果return_q_value=False
        """
        if len(self.memory) < self.batch_size:
            return (None, None) if return_q_value else None
        
        if self.use_prioritized:
            # 使用优先经验回放
            transitions, indices, weights = self.memory.sample(self.batch_size)
            weights_tensor = torch.FloatTensor(weights).to(self.device)
        else:
            # 使用普通经验回放
            transitions = self.memory.sample(self.batch_size)
            weights_tensor = None
        
        # 解包批次
        batch_state = torch.FloatTensor([t[0] for t in transitions]).to(self.device)
        batch_action = torch.LongTensor([[t[1]] for t in transitions]).to(self.device)
        batch_reward = torch.FloatTensor([[t[2]] for t in transitions]).to(self.device)
        batch_next_state = torch.FloatTensor([t[3] for t in transitions]).to(self.device)
        batch_done = torch.FloatTensor([[1 - int(t[4])] for t in transitions]).to(self.device)
        
        # 计算当前 Q 值
        current_q_values = self.policy_net(batch_state).gather(1, batch_action)
        
        # 计算下一个状态的 Q 值
        if self.use_double_dqn:
            # Double DQN: 使用策略网络选择动作，使用目标网络评估动作
            next_action = self.policy_net(batch_next_state).max(1)[1].unsqueeze(1)
            next_q_values = self.target_net(batch_next_state).gather(1, next_action)
        else:
            # 普通 DQN: 直接使用目标网络的最大 Q 值
            next_q_values = self.target_net(batch_next_state).max(1)[0].unsqueeze(1)
        
        # 计算目标 Q 值
        if self.n_step > 1:
            # 对于n步TD学习，需要调整gamma
            expected_q_values = batch_reward + batch_done * (self.gamma ** self.n_step) * next_q_values
        else:
            expected_q_values = batch_reward + batch_done * self.gamma * next_q_values
        
        # 计算损失
        if self.use_prioritized:
            # 计算 TD 误差
            td_errors = torch.abs(current_q_values - expected_q_values).detach().cpu().numpy()
            
            # 使用加权的 Huber 损失
            loss = F.smooth_l1_loss(current_q_values, expected_q_values, reduction='none')
            loss = (loss * weights_tensor).mean()
            
            # 更新优先级
            self.memory.update_priorities(indices, td_errors + 1e-6)  # 添加小常数避免优先级为0
        else:
            # 使用 Huber 损失
            loss = F.smooth_l1_loss(current_q_values, expected_q_values)
        
        # 优化模型
        self.optimizer.zero_grad()
        loss.backward()
        # 梯度裁剪，防止梯度爆炸
        torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
        self.optimizer.step()
        
        # 重置Noisy Networks的噪声
        if self.use_noisy:
            self.policy_net.reset_noise()
            self.target_net.reset_noise()
        
        # 计算平均Q值
        avg_q_value = current_q_values.mean().item() if return_q_value else None
        
        # 根据参数返回不同的结果
        if return_q_value:
            return loss.item(), avg_q_value
        else:
            return loss.item()
    
    def save_model(self, path: str):
        """
        保存模型
        
        Args:
            path: 保存路径
        """
        torch.save({
            'policy_net': self.policy_net.state_dict(),
            'target_net': self.target_net.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'epsilon': self.epsilon,
            'steps_done': self.steps_done,
            'episode': self.episode,
            'use_noisy': self.use_noisy,
            'n_step': self.n_step
        }, path)
    
    def load_model(self, path: str):
        """
        加载模型
        
        Args:
            path: 模型路径
        """
        checkpoint = torch.load(path, map_location=self.device)
        self.policy_net.load_state_dict(checkpoint['policy_net'])
        self.target_net.load_state_dict(checkpoint['target_net'])
        self.optimizer.load_state_dict(checkpoint['optimizer'])
        self.epsilon = checkpoint['epsilon']
        self.steps_done = checkpoint['steps_done']
        self.episode = checkpoint['episode']
        
        # 如果保存的模型有这些属性，则加载
        if 'use_noisy' in checkpoint:
            self.use_noisy = checkpoint['use_noisy']
        if 'n_step' in checkpoint:
            self.n_step = checkpoint['n_step'] 