# DQN及其改进算法Double DQN
# 状态连续：如Box([-1.2  -0.07], [0.6  0.07], (2,), float32) numpy.ndarray
# 动作离散：如Discrete(3) int
import os
import torch
import random
import collections
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import gymnasium as gym
from typing import Tuple
import torch.nn.functional as F
import matplotlib.pyplot as plt
from gymnasium.wrappers import RecordVideo
from config import (GAME_NAME, NUM_EPISODES, VIDEO_FREQ, HIDDEN_DIM, DEVICE, LEARNING_RATE, TARGET_UPDATE, TAU, EPSILON,
                    FINAL_EPSILON, EPSILON_DECAY, DISCOUNT_FACTOR, DQN_TYPE, BUFFER_SIZE, MINIMAL_SIZE, BATCH_SIZE, GAME_UPPER_STEP, REWARD_SCALE)

def make_env(name: str, render_video: bool, video_freq: int, max_steps: int = GAME_UPPER_STEP):
    env = gym.make(name, render_mode=None, max_episode_steps=max_steps)
    # 训练时不用，绘图时用"rgb_array"，可视化测试时用"human"
    if render_video:
        env = RecordVideo(
            env,
            video_folder=name+"-training",
            name_prefix="training",
            episode_trigger=lambda x: x % video_freq == 0
        )
    # env = RecordEpisodeStatistics(env, buffer_length=num_episodes)
    return env

class Qnet(nn.Module):
    """ q网络：输入状态，输出q值 """
    def __init__(self, state_dim: int, hidden_dim: int, action_dim: int):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim)
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor: # torch.Size([B, state_dim])
        return self.net(x) # torch.Size([B, action_dim])

class DQN_Agent:
    ''' DQN算法 '''
    def __init__(self,
        state_dim: int,
        hidden_dim: int,
        action_dim: int,
        device: torch.device,
        learning_rate: float,
        target_update: int,
        tau: float,
        epsilon: float,
        final_epsilon: float,
        epsilon_decay: float,
        discount_factor: float,
        dqn_type: str = 'DoubleDQN', # 'VanillaDQN' or 'DoubleDQN'
    ):
        self.state_dim = state_dim
        self.hidden_dim = hidden_dim
        self.action_dim = action_dim
        self.device = device
        self.learning_rate = learning_rate      # 学习率
        self.target_update = target_update      # 目标网络硬更新频率
        self.tau = tau                          # 目标网络软更新速度
        self.epsilon = epsilon                  # epsilon-贪婪策略
        self.final_epsilon = final_epsilon
        self.epsilon_decay = epsilon_decay
        self.discount_factor = discount_factor  # 奖励折扣因子
        self.dqn_type = dqn_type

        self.q_net          = Qnet(state_dim, hidden_dim, action_dim).to(device)
        self.target_q_net   = Qnet(state_dim, hidden_dim, action_dim).to(device)

        # 只给优化器传入q_net的参数，故在写loss的时候不用detach张量q_targets，即使它来自target_q_net。梯度不会回传到target_q_net
        self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=learning_rate)

        self.count = 0  # 记录主网络更新次数

    def take_action(self, state: np.ndarray) -> int:
        if np.random.random() < self.epsilon:
            action = np.random.randint(self.action_dim) # int
        else:
            # state: numpy.ndarray (state_dim,)，因为Qnet要处理带batch的数据，所以要调整维度，以及调整数据类型
            # state[None, :] 或 np.expand_dims(state, 0): numpy.ndarray (1,state_dim)
            state = torch.tensor(np.expand_dims(state,0), dtype=torch.float32, device=self.device) # torch.Size([1,state_dim])

            # self.q_net(state): torch.Size([1, action_dim])
            # torch.Size([1, action_dim]).argmax(dim=1) dim=1上最大值的位置索引: torch.Size([1])
            # torch.Size([1]).item(): 将张量内容转化为Python原生数值类型，这里为int
            action = self.q_net(state).argmax(dim=1).item() # int
        return action # int

    def update(self, batch: Tuple[np.ndarray, ...]):
        states, actions, rewards, next_states, dones = batch

        # 调整数据类型
        states      = torch.tensor(states,      dtype=torch.float32, device=self.device)              # torch.Size([B,state_dim])
        # actions在后面参与了PyTorch索引操作.gather，这要求actions数据类型必须是long(也叫int64)
        actions     = torch.tensor(actions,     dtype=torch.long,    device=self.device).unsqueeze(1) # torch.Size([B,1])
        rewards     = torch.tensor(rewards,     dtype=torch.float32, device=self.device).unsqueeze(1) # torch.Size([B,1])
        next_states = torch.tensor(next_states, dtype=torch.float32, device=self.device)              # torch.Size([B,state_dim])
        dones       = torch.tensor(dones,       dtype=torch.float32, device=self.device).unsqueeze(1) # torch.Size([B,1])

        # torch.Size([B,action_dim]).gather(1, actions): 在torch.Size([B,action_dim])的index=1维上以actions:torch.Size([B,1])为索引取值
        # 得torch.Size([B,1])即q值
        q_values = self.q_net(states).gather(1, actions)

        # 即使优化器只传入了q_net的参数，这里计算q_targets还是要with torch.no_grad()
        # 不是因为会影响反向传播，而是:①节省开销，不创建计算图②代码逻辑清晰
        with torch.no_grad():
            if self.dqn_type == 'DoubleDQN':
                max_action = self.q_net(next_states).max(1)[1].unsqueeze(1) # torch.Size([B,1])
                max_next_q_values = self.target_q_net(next_states).gather(1, max_action) # torch.Size([B,1])
            else:
                # torch.Size([B,action_dim]).max(1): 在index=1上找最大值，返回tuple(values,indices)，形状都为torch.Size([B])
                # 用[0]取values: torch.Size([B]) .unsqueeze(1)后: torch.Size([B,1])
                max_next_q_values = self.target_q_net(next_states).max(1)[0].unsqueeze(1) # torch.Size([B,1])
            q_targets = rewards + self.discount_factor * max_next_q_values * (1 - dones) # torch.Size([B,1])

        # torch.Size([B,1])和torch.Size([B,1])两两元素之差的平方的平均，即均方误差
        dqn_loss = F.mse_loss(q_values, q_targets, reduction='mean')
        print('Q loss: ', dqn_loss.item())

        self.optimizer.zero_grad()
        dqn_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.q_net.parameters(), max_norm=1.0)
        self.optimizer.step()
        self.count += 1

        # 每次主网络更新后都软更新目标网络
        for target_param, param in zip(self.target_q_net.parameters(), self.q_net.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

        # 每target_update次更新主网络，更新一次目标网络
        # if self.count % self.target_update == 0:
        #     self.target_q_net.load_state_dict(self.q_net.state_dict())

    def decay_epsilon(self):
        """Reduce exploration rate after each episode."""
        self.epsilon = max(self.final_epsilon, self.epsilon - self.epsilon_decay)

    def save_model(self, path: str):
        """保存主网络、优化器状态和训练超参数"""
        torch.save({
            'q_net_state_dict': self.q_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'epsilon': self.epsilon,
            'count': self.count
        }, path)
        print(f"Model saved to {path}")

    def load_model(self, path: str):
        """加载主网络，并同步目标网络"""
        if not os.path.exists(path):
            print(f"No saved model found at {path}, starting from scratch.")
            return False

        checkpoint = torch.load(path, map_location=self.device, weights_only=True)
        self.q_net.load_state_dict(checkpoint['q_net_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.epsilon = checkpoint.get('epsilon', self.epsilon)
        self.count = checkpoint.get('count', self.count)

        self.target_q_net.load_state_dict(self.q_net.state_dict())

        print(f"Model loaded from {path} and target network synchronized.")
        return True

class ReplayBuffer:
    """经验回放缓冲区"""
    def __init__(self, capacity: int):
        self.buffer = collections.deque(maxlen=capacity)

    def add(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, done: bool) -> None:
        """添加一条经验到缓冲区。"""
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        """随机采样 batch_size 条经验，并转换为 NumPy 数组。"""
        if batch_size > len(self.buffer):
            raise ValueError("Sample size larger than current buffer size.")

        transitions = random.sample(self.buffer, batch_size)
        # transitions = [(s1, a1, r1, s1_next, d1),(s2, a2, r2, s2_next, d2)] list of tuple

        # *transitions 解包，把列表拆成独立元素 (s1, a1, r1, s1_next, d1) (s2, a2, r2, s2_next, d2)
        # zip((s1, a1, r1, s1_next, d1),(s2, a2, r2, s2_next, d2))并行迭代多个序列，把每个序列的第 i 个元素打包成一个元组,即转置
        # zip(*transitions)返回一个迭代器 [(s1,s2),(a1,a2),(r1,r2),(s1_next,s2_next),(d1,d2)]
        states, actions, rewards, next_states, dones = zip(*transitions) # 用多重赋值接收迭代器
        # states: tuple of numpy.ndarray
        # actions: tuple of int
        # reward: tuple of float
        # next_states: tuple of numpy.ndarray
        # dones: tuple of bool

        return (
            np.stack(states),       # numpy.ndarray of numpy.ndarray of float32 (B, state_dim)
            np.array(actions),      # numpy.ndarray of int (B,)
            np.array(rewards),      # numpy.ndarray of float (B,)
            np.stack(next_states),  # numpy.ndarray of numpy.ndarray of float32 (B, state_dim)
            np.array(dones)         # numpy.ndarray of bool (B,)
        )

    def __len__(self) -> int:
        """支持 len(buffer) 语法。"""
        return len(self.buffer)

def train_dqn(env: gym.Env, agent: DQN_Agent, replay_buffer: ReplayBuffer, num_episodes: int, minimal_size: int, batch_size: int):
    return_list = [] # 轨迹奖励

    for _ in tqdm(range(num_episodes), desc="Training Episodes"):
        state, _ = env.reset() # state: numpy.ndarray (state_dim,)

        episode_return = 0.0
        done = False

        while not done:
            action = agent.take_action(state) # int

            next_state, reward, terminated, truncated, _ = env.step(action)
            # next_state: numpy.ndarray (state_dim,)
            # reward: float
            # terminated truncated: bool

            done = terminated or truncated # done: bool

            replay_buffer.add(state, action, float(reward) * REWARD_SCALE, next_state, done) # 暂存时不预处理数据类型

            state = next_state
            episode_return += reward

            # 学习更新
            if len(replay_buffer) >= minimal_size:
                batch = replay_buffer.sample(batch_size) # 读取时不预处理数据类型
                # batch: tuple of 5 ndarray
                agent.update(batch) # 更新时处理数据类型

        return_list.append(episode_return)
        agent.decay_epsilon()

        if len(return_list) % 10 == 0:
            plt.figure(figsize=(10, 5))
            plt.plot(return_list)
            plt.title("DQN Training Returns")
            plt.xlabel("Episode")
            plt.ylabel("Return")
            plt.grid()
            plt.savefig(plot_path)
            plt.show()

    return return_list

if __name__ == "__main__":
    # 定义结果保存目录：results/{GAME_NAME}/
    results_dir = os.path.join("results", GAME_NAME)
    os.makedirs(results_dir, exist_ok=True)

    # 模型和图像保存路径
    model_path = os.path.join(results_dir, "dqn_model.pth")
    plot_path = os.path.join(results_dir, "training_returns.png")

    # 创建环境
    env = make_env(GAME_NAME, render_video=False, video_freq=VIDEO_FREQ, max_steps=GAME_UPPER_STEP)
    state_dim = env.observation_space.shape[0]
    if not isinstance(env.action_space, gym.spaces.Discrete): # 显式检查动作空间类型
        raise NotImplementedError("DQN only supports discrete action spaces.")
    action_dim = env.action_space.n

    # 创建智能体
    agent = DQN_Agent(
        state_dim=state_dim,
        hidden_dim=HIDDEN_DIM,
        action_dim=action_dim,
        device=DEVICE,
        learning_rate=LEARNING_RATE,
        target_update=TARGET_UPDATE,
        tau = TAU,
        epsilon=EPSILON,
        final_epsilon=FINAL_EPSILON,
        epsilon_decay=EPSILON_DECAY,
        discount_factor=DISCOUNT_FACTOR,
        dqn_type = DQN_TYPE,
    )
    # 加载已有模型
    agent.load_model(model_path)

    # 经验回放
    replay_buffer = ReplayBuffer(BUFFER_SIZE)

    # 训练
    return_list = train_dqn(
        env=env,
        agent=agent,
        replay_buffer=replay_buffer,
        num_episodes=NUM_EPISODES,
        minimal_size=MINIMAL_SIZE,
        batch_size=BATCH_SIZE,
    )
    # 保存模型
    agent.save_model(model_path)

    # 关闭环境
    env.close()

    # 绘制训练曲线
    plt.figure(figsize=(10, 5))
    plt.plot(return_list)
    plt.title("DQN Training Returns")
    plt.xlabel("Episode")
    plt.ylabel("Return")
    plt.grid()
    plt.savefig(plot_path)
    plt.show()