import torch
import torch.nn as nn
import torch.optim as optim
import math
import random
import os

class DQNAgent:
    """改进的DQN智能体实现，包含梯度裁剪、学习率调度等稳定训练机制"""
    def __init__(self, config, network_class):
        self.config = config
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 创建策略网络和目标网络
        self.policy_net = network_class(
            config.state_dim,
            config.action_dim,
            config.hidden_dim
        ).to(self.device)

        self.target_net = network_class(
            config.state_dim,
            config.action_dim,
            config.hidden_dim
        ).to(self.device)

        # 复制参数到目标网络
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()

        # 优化器
        self.optimizer = optim.Adam(
            self.policy_net.parameters(),
            lr=config.learning_rate,
            eps=1e-4,  # 改善数值稳定性
            weight_decay=config.weight_decay  # L2正则化
        )

        # 🔥 新增：学习率调度器
        if config.lr_schedule == 'step':
            self.scheduler = optim.lr_scheduler.StepLR(
                self.optimizer,
                step_size=config.lr_step_size,
                gamma=config.lr_gamma
            )
        elif config.lr_schedule == 'exponential':
            self.scheduler = optim.lr_scheduler.ExponentialLR(
                self.optimizer,
                gamma=config.lr_gamma
            )
        elif config.lr_schedule == 'cosine':
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer,
                T_max=config.lr_cosine_steps
            )
        else:
            self.scheduler = None

        # 损失函数 - 使用Huber Loss替代MSE以提高稳定性
        if config.use_huber_loss:
            self.criterion = nn.SmoothL1Loss(reduction='none')  # Huber Loss
        else:
            self.criterion = nn.MSELoss(reduction='none')

        # 训练步数和更新计数
        self.steps_done = 0
        self.target_update_counter = 0

        # 🔥 新增：训练统计
        self.loss_history = []
        self.q_value_history = []
        self.grad_norm_history = []

    def select_action(self, state, epsilon):
        """使用ε-贪心策略选择动作"""
        if random.random() > epsilon:
            # 利用：选择Q值最大的动作
            with torch.no_grad():
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
                q_values = self.policy_net(state_tensor)
                action = q_values.max(1)[1].item()
        else:
            # 探索：随机选择动作
            action = random.randrange(self.config.action_dim)

        self.steps_done += 1
        return action

    def compute_epsilon(self, episode):
        """计算动态epsilon值"""
        return self.config.epsilon_end + \
            (self.config.epsilon_start - self.config.epsilon_end) * \
            math.exp(-1. * episode / self.config.epsilon_decay)

    def set_learning_rate(self, lr):
        """动态设置学习率"""
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = lr

    def get_learning_rate(self):
        """获取当前学习率"""
        return self.optimizer.param_groups[0]['lr']

    def update(self, batch, weights=None):
        """改进的网络参数更新，包含梯度裁剪和稳定性改进"""
        states, actions, rewards, next_states, dones = batch[:5]

        # 转换为张量
        states = torch.FloatTensor(states).to(self.device)
        actions = torch.LongTensor(actions).to(self.device)
        rewards = torch.FloatTensor(rewards).to(self.device)
        next_states = torch.FloatTensor(next_states).to(self.device)
        dones = torch.FloatTensor(dones).to(self.device)

        # 当前Q值
        current_q_values = self.policy_net(states).gather(1, actions.unsqueeze(1))

        # 计算目标Q值
        with torch.no_grad():
            if self.config.double_dqn:
                # Double DQN: 用策略网络选择动作，用目标网络计算Q值
                next_actions = self.policy_net(next_states).max(1)[1].unsqueeze(1)
                next_q_values = self.target_net(next_states).gather(1, next_actions)
            else:
                # 标准DQN
                next_q_values = self.target_net(next_states).max(1)[0].unsqueeze(1)

            # 🔥 改进：使用clamp避免Q值过大
            next_q_values = torch.clamp(next_q_values, -self.config.max_q_value, self.config.max_q_value)
            target_q_values = rewards.unsqueeze(1) + \
                              (1 - dones.unsqueeze(1)) * self.config.gamma * next_q_values

        # 计算TD误差
        td_errors = target_q_values - current_q_values

        # 计算损失
        if weights is not None:
            # 优先级经验回放的加权损失
            weights = torch.FloatTensor(weights).to(self.device).unsqueeze(1)
            loss = (weights * self.criterion(current_q_values, target_q_values)).mean()
        else:
            loss = self.criterion(current_q_values, target_q_values).mean()

        # 🔥 反向传播前清零梯度
        self.optimizer.zero_grad()

        # 反向传播
        loss.backward()

        # 🔥 核心改进：梯度裁剪防止梯度爆炸
        grad_norm = torch.nn.utils.clip_grad_norm_(
            self.policy_net.parameters(),
            self.config.grad_clip_norm
        )

        # 记录梯度范数用于监控
        self.grad_norm_history.append(grad_norm.item())
        if len(self.grad_norm_history) > 1000:
            self.grad_norm_history.pop(0)

        # 优化器步进
        self.optimizer.step()

        # 🔥 新增：学习率调度
        if self.scheduler and self.steps_done % self.config.lr_update_freq == 0:
            self.scheduler.step()

        # 记录训练统计
        self.loss_history.append(loss.item())
        self.q_value_history.append(current_q_values.mean().item())
        if len(self.loss_history) > 1000:
            self.loss_history.pop(0)
            self.q_value_history.pop(0)

        # 返回TD误差用于优先级更新
        td_errors_abs = torch.abs(td_errors).detach().cpu().numpy()

        return loss.item(), current_q_values.mean().item(), td_errors_abs

    def update_target_network(self):
        """更新目标网络 - 改进为软更新或固定频率硬更新"""
        self.target_update_counter += 1

        if self.config.soft_target_update:
            # 🔥 软更新：每步都进行小幅度更新
            tau = self.config.target_update_tau
            for target_param, policy_param in zip(self.target_net.parameters(), self.policy_net.parameters()):
                target_param.data.copy_(tau * policy_param.data + (1.0 - tau) * target_param.data)
        else:
            # 硬更新：固定频率完全复制
            if self.target_update_counter % self.config.target_update_freq == 0:
                self.target_net.load_state_dict(self.policy_net.state_dict())

    def get_training_stats(self):
        """获取训练统计信息"""
        stats = {}
        if self.loss_history:
            stats['avg_loss'] = sum(self.loss_history[-100:]) / min(len(self.loss_history), 100)
            stats['max_loss'] = max(self.loss_history[-100:])
            stats['min_loss'] = min(self.loss_history[-100:])

        if self.q_value_history:
            stats['avg_q_value'] = sum(self.q_value_history[-100:]) / min(len(self.q_value_history), 100)

        if self.grad_norm_history:
            stats['avg_grad_norm'] = sum(self.grad_norm_history[-100:]) / min(len(self.grad_norm_history), 100)
            stats['max_grad_norm'] = max(self.grad_norm_history[-100:])

        stats['learning_rate'] = self.get_learning_rate()
        stats['steps_done'] = self.steps_done

        return stats

    def save(self, path):
        """保存模型，包含调度器状态"""
        os.makedirs(os.path.dirname(path), exist_ok=True)

        checkpoint = {
            'policy_net_state_dict': self.policy_net.state_dict(),
            'target_net_state_dict': self.target_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'steps_done': self.steps_done,
            'target_update_counter': self.target_update_counter,
        }

        # 保存调度器状态
        if self.scheduler:
            checkpoint['scheduler_state_dict'] = self.scheduler.state_dict()

        torch.save(checkpoint, path)

    def load(self, path):
        """加载模型，包含调度器状态"""
        checkpoint = torch.load(path, map_location=self.device)

        self.policy_net.load_state_dict(checkpoint['policy_net_state_dict'])
        self.target_net.load_state_dict(checkpoint['target_net_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.steps_done = checkpoint.get('steps_done', 0)
        self.target_update_counter = checkpoint.get('target_update_counter', 0)

        # 加载调度器状态
        if self.scheduler and 'scheduler_state_dict' in checkpoint:
            self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])