import random
from collections import deque, namedtuple

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim

from .models import DQN

# 定义经验回放的单个转换
Transition = namedtuple(
    "Transition", ("state", "action", "next_state", "reward", "done")
)


class ReplayMemory:
    """经验回放缓冲区"""

    def __init__(self, capacity):
        """确保使用maxlen参数限制大小"""
        self.memory = deque(maxlen=capacity)  # 明确指定maxlen参数
        self.capacity = capacity  # 存储容量以便监控

    def push(self, *args):
        """保存一个转换"""
        self.memory.append(Transition(*args))

    def sample(self, batch_size):
        """随机采样一批转换"""
        return random.sample(self.memory, batch_size)

    def __len__(self):
        return len(self.memory)

    def is_full(self):
        """检查缓冲区是否已满"""
        return len(self.memory) >= self.capacity


class DQNAgent:
    """DQN智能体"""

    def __init__(
        self,
        state_shape,
        n_actions,
        device="cuda" if torch.cuda.is_available() else "cpu",
    ):
        self.state_shape = state_shape  # (C, H, W) 其中C现在是3*stack_frames
        self.n_actions = n_actions
        self.device = device

        # 图像输入的高度、宽度和通道数
        c, h, w = state_shape

        # 创建策略和目标网络，注意输入通道数增加了
        self.policy_net = DQN(h, w, n_actions, input_channels=c).to(device)
        self.target_net = DQN(h, w, n_actions, input_channels=c).to(device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()  # 目标网络不需要训练

        # 超参数
        self.batch_size = 64  # 增大批次大小以提高GPU利用率
        self.gamma = 0.99  # 折扣因子
        self.eps_start = 1.0  # epsilon初始值
        self.eps_end = 0.1  # epsilon最终值
        self.eps_decay = 5000  # epsilon衰减速率
        self.target_update = 10000  # 目标网络更新频率

        # 优化器
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=0.00025)

        # 经验回放缓存
        self.memory = ReplayMemory(2000)  # 增大回放缓冲区

        # 训练步数计数器
        self.steps_done = 0

        # 数据加载优化 - 使用pin_memory加速CPU到GPU的数据传输
        self.pin_memory = torch.cuda.is_available()

    def select_action(self, state, training=True):
        """选择动作"""
        sample = random.random()

        # 计算当前的epsilon
        eps_threshold = self.eps_end + (self.eps_start - self.eps_end) * np.exp(
            -1.0 * self.steps_done / self.eps_decay
        )

        if training:
            self.steps_done += 1

        if not training or sample > eps_threshold:
            # 贪婪策略 - 优化GPU数据传输
            with torch.no_grad():
                if self.pin_memory:
                    state_tensor = (
                        torch.FloatTensor(state)
                        .unsqueeze(0)
                        .pin_memory()
                        .to(self.device, non_blocking=True)
                    )
                else:
                    state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
                return self.policy_net(state_tensor).max(1)[1].view(1, 1).item()
        else:
            # 随机策略
            return random.randrange(self.n_actions)

    def optimize_model(self):
        """优化模型"""
        if len(self.memory) < self.batch_size:
            return

        # 随机采样一批转换
        transitions = self.memory.sample(self.batch_size)

        # 转换批次为批次转换
        batch = Transition(*zip(*transitions))

        # 计算非终止状态的掩码
        non_final_mask = torch.tensor(
            tuple(map(lambda s: s is not None, batch.next_state)),
            device=self.device,
            dtype=torch.bool,
        )

        # 优化数据传输 - 使用pin_memory和异步传输
        if self.pin_memory:
            # 准备批次数据 - 使用pin_memory加速传输
            state_batch = (
                torch.FloatTensor(np.array(batch.state))
                .pin_memory()
                .to(self.device, non_blocking=True)
            )
            action_batch = (
                torch.LongTensor(batch.action)
                .unsqueeze(1)
                .pin_memory()
                .to(self.device, non_blocking=True)
            )
            reward_batch = (
                torch.FloatTensor(batch.reward)
                .pin_memory()
                .to(self.device, non_blocking=True)
            )

            # 检查是否有非终止状态
            if non_final_mask.sum() > 0:  # 至少有一个非终止状态
                non_final_next_states = (
                    torch.cat(
                        [
                            torch.FloatTensor(s).unsqueeze(0)
                            for s in batch.next_state
                            if s is not None
                        ]
                    )
                    .pin_memory()
                    .to(self.device, non_blocking=True)
                )
            else:
                non_final_next_states = None
        else:
            # 准备批次数据 - 标准方式
            state_batch = torch.FloatTensor(np.array(batch.state)).to(self.device)
            action_batch = torch.LongTensor(batch.action).unsqueeze(1).to(self.device)
            reward_batch = torch.FloatTensor(batch.reward).to(self.device)

            # 检查是否有非终止状态
            if non_final_mask.sum() > 0:  # 至少有一个非终止状态
                non_final_next_states = torch.cat(
                    [
                        torch.FloatTensor(s).unsqueeze(0)
                        for s in batch.next_state
                        if s is not None
                    ]
                ).to(self.device)
            else:
                non_final_next_states = None

        # 通过策略网络计算Q值
        state_action_values = self.policy_net(state_batch).gather(1, action_batch)

        # 计算下一状态的预期Q值
        next_state_values = torch.zeros(self.batch_size, device=self.device)
        # 只有在有非终止状态时才计算next_state_values
        if non_final_mask.sum() > 0 and non_final_next_states is not None:
            with torch.no_grad():  # 确保目标网络梯度不被计算
                next_state_values[non_final_mask] = self.target_net(
                    non_final_next_states
                ).max(1)[0]

        # 计算预期的Q值
        expected_state_action_values = (next_state_values * self.gamma) + reward_batch

        # 计算Huber损失
        criterion = nn.SmoothL1Loss()
        loss = criterion(state_action_values, expected_state_action_values.unsqueeze(1))

        # 优化模型
        self.optimizer.zero_grad()
        loss.backward()
        # 梯度裁剪，防止爆炸
        for param in self.policy_net.parameters():
            if param.grad is not None:
                param.grad.data.clamp_(-1, 1)
        self.optimizer.step()

        return loss.item()

    def update_target_network(self):
        """更新目标网络"""
        self.target_net.load_state_dict(self.policy_net.state_dict())

    def save(self, path):
        """保存模型"""
        torch.save(
            {
                "policy_net": self.policy_net.state_dict(),
                "steps_done": self.steps_done,  # 保存步数以便恢复训练
                "optimizer": self.optimizer.state_dict(),  # 保存优化器状态
            },
            path,
        )

    def load(self, path):
        """加载模型"""
        checkpoint = torch.load(path, map_location=self.device)
        self.policy_net.load_state_dict(checkpoint["policy_net"])
        self.target_net.load_state_dict(checkpoint["policy_net"])
        # 如果有训练状态，也恢复它们
        if "steps_done" in checkpoint:
            self.steps_done = checkpoint["steps_done"]
        if "optimizer" in checkpoint:
            self.optimizer.load_state_dict(checkpoint["optimizer"])
