#!/usr/bin/env python3
'''
已适配，未验证，参考链接：https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/pqn.py#L89
'''
import os
import gymnasium as gym
import ptan
import argparse

import logging
from logging.handlers import RotatingFileHandler
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np

from tensorboardX import SummaryWriter
from typing import Any
from lib import dqn_model, common
import ale_py
import random

gym.register_envs(ale_py)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info

class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        reward /= 100 # 缩放奖励
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    


def wrap_dqn(env, seed):
    def _thunk():
        env.action_space.seed(seed=seed)
        return env
    return _thunk


def test_model(env, net, device, episodes=5):
    total_reward = 0.0
    for _ in range(episodes):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
            logits_v = net(obs_v)
            probs_v = F.softmax(logits_v, dim=1)
            probs = probs_v.data.cpu().numpy()
            action = np.argmax(probs)
            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            total_reward += reward
            if done or trunc:
                break
    return total_reward / episodes


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


if __name__ == "__main__":
    params = {
        'env_name':         "CartPole-v1",
        'stop_reward':      5000.0,
        'run_name':         'dqn-basic-pqn',
        'replay_size':      10 ** 6,
        'replay_initial':   50000,
        'target_net_sync':  10000,
        'epsilon_frames':   10 ** 6,
        'epsilon_start':    1.0,
        'epsilon_final':    0.1,
        'learning_rate':    0.00025,
        'gamma':            0.99,
        'q_lambda':         0.65,
        'batch_size':       128 * 8,
        'mini_batch_size':   32,
        'num_envs':         8,
        'update_epochs':    4
    }

    save_path = os.path.join("saves", params['run_name'])
    os.makedirs(save_path, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    env = gym.vector.SyncVectorEnv([wrap_dqn(gym.make(params['env_name']), seed=random.randint(0, 100) + i) for i in range(params['num_envs'])])
    test_env = wrap_dqn(gym.make(params['env_name']), seed=random.randint(0, 100))()


    # 创建tensor board的存储目录
    writer = SummaryWriter(comment="cartpole-basic")
    # 创建dqn网络模型
    net = dqn_model.PQNCartpole(np.array(env.single_observation_space.shape).prod(), env.single_action_space.n).to(device)
    optimizer = optim.RAdam(net.parameters(), lr=params['learning_rate'])
    # 创建一个基于epsilon进行动作动作选择的选择器
    selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=1.0)
    # epsion的值含义:epsion标识一个会随着训练的增加而逐渐减少的随机值
    # 表示训练一开始以随机选择执行的动作为主，因为此时网络并没有训练，进行随机探索
    # 随着训练的增加，网络已经具备一些情况下的决策能力，可以进行动作的选择
    epsilon_tracker = common.EpsilonTracker(selector, 1.0, 0.1, 10**6)
    # 创建一个dqn推理网络的代理
    agent = ptan.agent.DQNAgent(net, selector, device=device)

    # 创建经验重放缓冲区（也就是训练过程中采集的样本）
    # ExperienceSourceFirstLast应该是存储样本的缓存去
    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=0.99, steps_count=1, vectorized=True)
    # 对于矢量环境并行游戏数据采集
    # 参考链接中的代码虽然采集的时候是采用如同(num_steps, num_envs, env_action_shape)
    # 这样的形式直接存储采集数据，但是在送入网络训练钱，还是会将数据采用view(-1, env_action_shape)
    # 的方式将数据展平，其最终的训练数据格式和我们代码中的ExperienceSourceFirstLast一致
    # 第二个应该是从缓存区提取一小段样本进行训练的
    buffer = []

    # 优化器
    optimizer = optim.Adam(net.parameters(), lr=0.0001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=600000, gamma=0.9)

    # 统计所执行经理的帧数（轮数）
    frame_idx = 0
    train_count = 0
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))

        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            net.load_state_dict(checkpoint['net'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']
            scheduler.load_state_dict(checkpoint['scheduler'])
            print("加载模型成功")
            print("学习率：", optimizer.param_groups[0]['lr'])
            print("frame_idx: ", frame_idx)
            print("scheduler last epoch: ", scheduler.last_epoch)

    logger = setup_logger(save_path)
    # 创建一个奖励跟踪器
    with common.RewardTracker(writer, stop_reward=10000.0, logger=logger) as reward_tracker:
        for step_idx, exp in enumerate(exp_source):
            frame_idx += 1
            # 更新epsilon值
            epsilon_tracker.frame(frame_idx)
            buffer.append(exp)

            # 这个操作是将所有采集的激励pop出缓存并清空缓存
            # 这里清空仅仅只清空exp_source中的记录，在buffer中的记录并没有清空
            new_rewards = exp_source.pop_total_rewards()
            if new_rewards:
                # 这里主要是判断最近的一次激励是否达到了目标值，达到则停止训练
                if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
                    # 将当前采集的首个激励，帧总数传入到reward_tracker
                    # 如果平均激励值已经达到了所要的目标值，则break跳出循环
                    break

            # 检查经验池中的样本长度是否达到了目标的长度大小，
            if len(buffer) < params['batch_size']:
                continue

            # 清空优化器的梯度
            optimizer.zero_grad()
            # 从经验缓冲区中采集batch_size大小的样本
            q_v = common.calc_q_lambda_returns(buffer, net, gamma=params['gamma'], q_lambda=params['q_lambda'], device=device)
            b_idx = np.arange(params['batch_size'])
            for epoch in range(params['update_epochs']):
                np.random.shuffle(b_idx)
                for start in range(0, params['batch_size'], params['mini_batch_size']):
                    end = start + params['mini_batch_size']
                    batch = [buffer[i] for i in b_idx[start:end]]
                    states, actions, _, _, _ = common.unpack_batch(batch)
                    states_v = torch.tensor(states, device=device)
                    actions_v = torch.tensor(actions, device=device)
                    # 计算网络的输出
                    old_q_vals = net(states_v).gather(1, actions_v.unsqueeze(-1).long()).squeeze(-1)
                    # 计算损失值
                    loss_v = F.mse_loss(q_v[b_idx[start:end]], old_q_vals)
                    # 反向传播
                    loss_v.backward()
                    # 更新优化器
                    optimizer.step()


            buffer.clear()
            if train_count % 100 == 0:
                # Test the model
                net.eval()
                test_reward = test_model(test_env, net, device=device, episodes=5)
                net.train()
                print(f"Test reward: {test_reward:.2f}")
                common.save_best_model(test_reward, net.state_dict(), save_path, "dqn-basic-best", keep_best=10)


                checkpoint = {
                    'net': net.state_dict(),
                    'train_count': train_count,
                    'optimizer': optimizer.state_dict(),
                    'frame_idx': frame_idx,
                    'scheduler': scheduler.state_dict()
                }
                common.save_checkpoints(frame_idx, checkpoint, save_path, "dqn-basic", keep_last=5)
