'''
已适配，参考链接：https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/pqn_atari_envpool_lstm.py
本代码未完全参考链接，由其是在最后的训练阶段采用的lstm状态为每一步的lstm状态
todo 不完全是pqn算法，适配矢量多环境，严格实现pqn的算法

训练记录，在cloudstudio上训练
20241218：训练分数227分，测试分数521.4分
20241219：学习率学习率： 0.00025，训练分数345分，测试分数562分
20241220：学习率： 0.00025，训练分数398分，测试分数786分
20241221: 学习率：0.00025，训练分数513分，测试分数933分
20241223:学习率：0.00025，frame_idx:  20403200，训练分数542分，测试分数1000分，继续训练
20241224：学习率： 0.00025，训练分数671分，测试分数1282分，继续训练
20241225：学习率： 0.00025，  训练分数685，测试分数  1282分，继续训练
20241226：学习率： 0.00025，训练分数756分，测试分数1415分，继续训练
20241227：学习率： 0.00025，训练分数782分，测试分数1415分，继续训练
20241228：学习率： 0.00025，训练分数775分，测试分数1415分，继续训练，考虑调整学习率
20241230: 调整学习率调度器的频率，一天调整2次学习率，学习率： 0.00025，训练分数800分，测试分数1550.6分
20241231: 学习率： 0.00022500000000000002，训练分数842分，测试分数1550分，继续训练
20250101: 学习率： 0.00020250000000000002，训练分数864分，测试分数1550分，继续训练
20250102：学习率： 0.00013286025，训练分数815分，测试分数1550分，继续训练一天
20250103:学习率： 0.0001076168025，训练分数872分，测试分数1558分，继续训练
20250104:学习率： 9.685512225e-05，训练分数854分，测试分数1594/6293分（应该是偶发，差距太大，后续还是判断是否会有超过1594分），继续训练
20250105: 学习率： 8.7169610025e-05，测试分数873分，测试分数1626/6293分，继续训练
20250106: 学习率： 6.354664570822501e-05，训练分数888分，测试分数1814分/6293分，继续训练
20250107：学习率： 5.1472783023662265e-05，训练分数894分，测试分数1879分/6293分，继续训练
20250108:学习率： 4.169295424916644e-05，训练分数890分，测试分数2125分/6293分，继续训练
20250109:学习率： 3.377129294182482e-05，训练分数913分，测试分数3136/6293分，继续训练
20250110:学习率： 2.4619272554590296e-05，训练分数909分，测试分数3136/6293分，继续训练
20250111: 学习率： 1.7947449692296326e-05，训练分数923分，测试分数3136/6293分，继续训练
20250113：学习率： 1.3083690825684023e-05，训练分数949，测试分数3136/5025/6293分，继续训练
20250114: 学习率： 1.059778956880406e-05，训练分数956分，测试分数3136/5025/6293分，继续分数
20250115: 学习率： 9.538010611923653e-06,训练分数915，测试分数3136/5025/6293分，继续训练一天
20250116： 学习率： 8.584209550731288e-06，训练分数938分，测试分数3136/5025/6293分，继续训练
20250117: 学习率： 7.72578859565816e-06,训练分数939分，测试分数3136/5025/6293分，继续训练一天
20250119:学习率： 6.953209736092344e-06，训练分数925.8分，测试分数未产生新分数，无进步，暂停训练，play模型
'''
#!/usr/bin/env python3
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from tensorboardX import SummaryWriter
import random
import os

from lib import dqn_model, common
import ale_py

gym.register_envs(ale_py)

STATES_TO_EVALUATE = 1000
EVAL_EVERY_FRAME = 100


def calc_loss(batch, batch_lstm, net, tgt_net, gamma, device="cpu", double=True):
    '''
    添加了计算双dqn的损失方式
    '''
    states, actions, rewards, dones, next_states = common.unpack_batch(batch)

    states_v = torch.tensor(states).to(device)
    next_states_v = torch.tensor(next_states).to(device)
    actions_v = torch.tensor(actions).to(device)
    rewards_v = torch.tensor(rewards).to(device)
    done_mask = torch.BoolTensor(dones).to(device)
    lstm_v = batch_lstm[:-1]
    lstm_next_v = batch_lstm[1:]

    # 更新网络计算当前状态下，执行动作得到的Q值
    state_action_values = net(states_v, lstm_v)[0].gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
    # 判断是否是双步DQN
    if double:
        # 更细网络根据下一个状态得到能够得到最大q值执行的动作
        next_state_actions = net(next_states_v, lstm_next_v)[0].max(1)[1]
        # 目标网络根据下一个状态得到Q值后，根据更新网络得到的执行动作，选择下一个状态的最大Q值
        next_state_values = tgt_net(next_states_v, lstm_next_v)[0].gather(1, next_state_actions.unsqueeze(-1)).squeeze(-1)
        # 通过以上步骤，就实现了通过两个DQN网络，计算出下一个状态的最大Q值
    else:
        next_state_values = tgt_net(next_states_v, lstm_next_v)[0].max(1)[0]
    next_state_values[done_mask] = 0.0

    expected_state_action_values = next_state_values.detach() * gamma + rewards_v
    return nn.MSELoss()(state_action_values, expected_state_action_values)


def calc_values_of_states(states, net, device="cpu"):
    mean_vals = []
    for batch in np.array_split(states, 64):
        states_v = torch.tensor(batch).to(device)
        action_values_v = net(states_v)
        best_action_values_v = action_values_v.max(1)[0]
        mean_vals.append(best_action_values_v.mean().item())
    return np.mean(mean_vals)


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward > 99:
            reward //= 10

        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward -= self.life_loss_penalty
            self.previous_lives = current_lives

        return obs, reward, done, truncated, info


def wrap_dqn(env_id, stack_frames=4, episodic_life=True, seed=None, reward_clipping=True):

    def _thunk():
        env = gym.make(env_id)
        if episodic_life:
            # 将多条生命的游戏模拟成单条生命ActorCriticAgent
            env = ptan.common.wrappers.EpisodicLifeEnv(env)
        # 增强初始化
        env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

        if 'FIRE' in env.unwrapped.get_action_meanings():
            env = ptan.common.wrappers.FireResetEnv(env)
        env = ptan.common.wrappers.ProcessFrame84(env)
        env = ptan.common.wrappers.ImageToPyTorch(env)
        env = ptan.common.wrappers.FrameStack(env, stack_frames)
        env = RewardPenaltyWrapper(env)
        if seed is not None:
            env.action_space.seed(seed)
        return env
    return _thunk



class DQNLstmAgent(ptan.agent.BaseAgent):

    def __init__(self, dqn_model, num_envs, action_selector, device="cpu", preprocessor=ptan.agent.default_states_preprocessor):
        '''
        param dqn_model: dqn网络模型，训练的的网络
        param action_selector: 动作选择器
        '''

        self.dqn_model = dqn_model
        self.action_selector = action_selector
        self.preprocessor = preprocessor
        self.device = device
        self.num_envs = num_envs
        self.next_lstm_state = (
            torch.zeros(dqn_model.lstm.num_layers, self.num_envs, dqn_model.lstm.hidden_size).to(device),
            torch.zeros(dqn_model.lstm.num_layers, self.num_envs, dqn_model.lstm.hidden_size).to(device),
        )


    def initial_state(self):
        """
        Should create initial empty state for the agent. It will be called for the start of the episode
        :return: Anything agent want to remember
        """
        return (
            self.next_lstm_state[0].clone(),
            self.next_lstm_state[1].clone(),
        )


    def clone_next_lstm_state(self):
        return (
            self.next_lstm_state[0].clone(),
            self.next_lstm_state[1].clone(),
        )

    @torch.no_grad()
    def __call__(self, states, agent_states=None):
        # 如果定义了预处理器，则将state进行预处理
        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)
        # 用传入的模型计算预测q值或者动作概率
        q_v, self.next_lstm_state = self.dqn_model(states, self.next_lstm_state)
        q = q_v.data.cpu().numpy()
        actions = self.action_selector(q)
        return actions, agent_states

    # 增加保存和加载next_lstm_state的代码
    def save_state_dict(self, checkpoints):
        checkpoints['next_lstm_state'] = self.next_lstm_state

    def load_state_dict(self, checkpoints):
        self.next_lstm_state = checkpoints['next_lstm_state']


def test_model(env, net, device, num_envs=1, episodes=5):
    with torch.no_grad():
        next_lstm_state = (
            torch.zeros(net.lstm.num_layers, num_envs, net.lstm.hidden_size).to(device),
            torch.zeros(net.lstm.num_layers, num_envs, net.lstm.hidden_size).to(device),
        )  # hidden and cell states (see https://youtu.be/8HyCNIVRbSU)
        total_reward = 0.0
        for _ in range(episodes):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                q_vals, next_lstm_state = net(obs_v, next_lstm_state)
                probs = q_vals.cpu().numpy()
                action = probs.argmax()
                obs, reward, done, trunc, _ = env.step(action)
                total_reward += reward
                if done or trunc:
                    break
        return total_reward / episodes


if __name__ == "__main__":
    params = {
        'env_name': "ALE/Centipede-v5",
        'stop_reward': 5000.0,
        'run_name': 'pqn-lstm-double',
        'buffer_size': 128,
        'target_net_sync': 10000,
        'epsilon_frames': 10 ** 6,
        'epsilon_start': 1.0,
        'epsilon_final': 0.1,
        'learning_rate': 0.00025,
        'gamma': 0.99,
        'q_lambda': 0.65,
        'batch_size': 128 * 8,
        'mini_batch_size': 32,
        'num_envs': 1,
        'update_epochs': 4
    }
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
    parser.add_argument("--double", default=False, action="store_true", help="Enable double DQN")
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda and torch.cuda.is_available() else "cpu")
    save_path = os.path.join("saves", params['run_name'])
    os.makedirs(save_path, exist_ok=True)

    # 如果对标参考链接的算法，那么无法使用矢量环境多环境并发训练，因为lstm状态需要对标每一步，而多环境下无法保证每一步都对标，因为有的会因为done提前返回
    # 后续优化代码实现多环境并发训练
    # env = gym.vector.SyncVectorEnv([wrap_dqn(params['env_name'], seed=random.randint(0, 100) + i) for i in range(params['num_envs'])])
    env = wrap_dqn(params['env_name'])()
    test_env = wrap_dqn(params['env_name'])()

    # 创建训练网络
    writer = SummaryWriter(comment="-" + params['run_name'] + "-double=" + str(args.double))
    net = dqn_model.DQN(env.observation_space.shape, env.action_space.n, num_envs=params['num_envs']).to(device)

    # 创建目标网络
    # 创建Epsilon训练动作选择器
    # 根据这两个参数得到训练网络代理器
    tgt_net = ptan.agent.TargetNet(net)
    # epsilon 追踪器，用来跟踪epsilon参数的变化，调整参数
    selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params['epsilon_start'])
    epsilon_tracker = common.EpsilonTracker(selector, params)
    # 这个代理的作用，就是可以根据当前的训练进度，选择从net中选择动作还是根据selector选择动作
    agent = DQNLstmAgent(net, params['num_envs'], selector, device=device)

    # 创建经验重放缓冲区
    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'], steps_count=1)
    buffer = []
    buffer_lstm = []
    # 创建神经网络优化器
    optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50000, gamma=0.9)

    frame_idx = 0
    train_count = 0
    eval_states = None

    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))

        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            net.load_state_dict(checkpoint['net'])
            tgt_net.target_model.load_state_dict(checkpoint['tgt_net'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']
            scheduler.load_state_dict(checkpoint['scheduler'])
            agent.load_state_dict(checkpoint)
            selector.epsilon = checkpoint['epsilon']
            print("加载模型成功")
            print("学习率：", optimizer.param_groups[0]['lr'])
            print("train_count: ", train_count)
            print("scheduler last epoch: ", scheduler.last_epoch)

    initial_lstm_state = agent.initial_state()
    buffer_lstm.append(agent.clone_next_lstm_state())
    with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
        for step_idx, exp in enumerate(exp_source):
            frame_idx += 1
            # 从经验池中获取一次样本存放在缓存中
            # 根据源码，这里执行了populate后会执行一次完整的游戏流程知道游戏结束
            # 根据当前的轮数，更新epsilon值
            epsilon_tracker.frame(frame_idx)
            buffer.append(exp)
            buffer_lstm.append(agent.clone_next_lstm_state())

            # 这个操作是将exp_source所有采集的激励pop出缓存并清空缓存
            # 这里清空不会导致buffer清空，因为buffer::populate操作会拷贝一份
            # 样本
            new_rewards = exp_source.pop_total_rewards()
            if new_rewards:
                # 判断当前的训练所获取的激励是否满足了指定的标准
                if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
                    break

            if len(buffer) < params['buffer_size']:
                continue


            optimizer.zero_grad()

            loss_v = calc_loss(buffer, buffer_lstm, net, tgt_net.target_model, gamma=params['gamma'], device=device,
                               double=args.double)
            loss_v.backward()
            optimizer.step()
            scheduler.step()
            scheduler.step()
            train_count += 1
            buffer.clear()
            buffer_lstm.clear()
            initial_lstm_state = agent.initial_state()
            buffer_lstm.append(agent.clone_next_lstm_state())

            if frame_idx % params['target_net_sync'] == 0:
                tgt_net.sync()
            # if frame_idx % EVAL_EVERY_FRAME == 0:
            #     mean_val = calc_values_of_states(eval_states, net, device=device)
            #     writer.add_scalar("values_mean", mean_val, frame_idx)
            if train_count % 100 == 0:
                # Test the model
                net.eval()
                test_reward = test_model(test_env, net, device=device, episodes=5)
                net.train()
                print(f"Test reward: {test_reward:.2f}")
                common.save_best_model(test_reward, net.state_dict(), save_path, "dqn-double-lstm-best", keep_best=10)


                checkpoint = {
                    'net': net.state_dict(),
                    'train_count': train_count,
                    'optimizer': optimizer.state_dict(),
                    'frame_idx': frame_idx,
                    'scheduler': scheduler.state_dict(),
                    'tgt_net': tgt_net.target_model.state_dict(),
                    'epsilon': selector.epsilon,
                }
                agent.save_state_dict(checkpoints=checkpoint)
                common.save_checkpoints(frame_idx, checkpoint, save_path, "dqn-double-lstm", keep_last=5)

