#!/usr/bin/env python3
'''
已适配

训练记录：
在cloudstudio上训练
20241220：训练了train_frame_idx=386后，出现nan值，暂停训练，排查各种参数，解决问题
'''

import collections
import os
import math
import random
from typing import Any
import torch.nn as nn
import ptan
import time
import gymnasium as gym
import ale_py
import argparse
from tensorboardX import SummaryWriter
from torch.distributions import Categorical

from lib import model, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn.utils as nn_utils
from collections import deque
import torch.multiprocessing as mp

gym.register_envs(ale_py)
GAMMA = 0.9
GAE_LAMBDA = 1.00 # 优势估计器的lambda因子，0.95是一个比较好的值

TRAJECTORY_SIZE = 129
LEARNING_RATE_ACTOR = 5e-4

PPO_EPS = 0.2
PPO_EPOCHES = 10 # todo 执行ppo的迭代次数 作用
PPO_BATCH_SIZE = 64 # 每次进行轨迹样本计算的batch长度

TEST_ITERS = 100000 # 采样迭代多少次，进行一次游戏测试

PROCESSES_COUNT = 4

CLIP_GRAD = 0.5

class ModelPPO(nn.Module):
    def __init__(self, obs_size, act_size):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(ModelPPO, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(obs_size[0], 64, kernel_size=8, stride=4),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )

        conv_out_size = self._get_conv_out(obs_size)
        self.linear = nn.Sequential(
            nn.Linear(conv_out_size, 512),
            nn.ReLU(),
            nn.Dropout(p=0.2))
        self.action_linear = nn.Linear(512, act_size)
        self.critic_linear = nn.Linear(512, 1)

    def _get_conv_out(self, shape):
        return self.conv(torch.zeros(1, *shape)).nelement()
    

    def forward(self, x):
        x = x.float() / 255.0
        conv_out = self.conv(x).view(x.size(0), -1)
        x = self.linear(conv_out)
        return self.action_linear(x), self.critic_linear(x)


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            reward //= 100
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward -= self.life_loss_penalty
            self.previous_lives = current_lives
        
        
        return obs, reward, done, truncated, info



def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    # if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        # env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = ptan.common.wrappers.FireResetEnv(env)
    env = ptan.common.wrappers.ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = RewardPenaltyWrapper(env)
    return env


def test_net(net, env, count=10, device="cpu"):
    with torch.no_grad():
        rewards = 0.0
        steps = 0
        for _ in range(count):
            noop_action_count = 0
            pre_action = -1
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
                mu_v, _ = net(obs_v)

                action = mu_v.squeeze(dim=0).data.cpu().argmax().item()

                if action == 0 and pre_action == action:  # Noop
                    noop_action_count += 1
                    if noop_action_count > 30:
                        break
                else:
                    noop_action_count = 0
                pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
    return rewards / count, steps / count


def calc_adv_ref(trajectory, net_ppo, states_v, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    with torch.no_grad():
        _, values_v = net_ppo(states_v) # 得到预测的Q值
    values = values_v.squeeze().data.cpu().numpy()
    # generalized advantage estimator: smoothed version of the advantage
    # 广义优势估计量:优势的平滑版
    last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
    # 这里会将未来的优势获取的情况考虑在内
    result_adv = [] # 存储动作的优势值
    result_ref = [] # 存储实际的Q值
    # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
    # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
    # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            # 如果游戏的状态是结束的
            delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
            last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
        else:
            # 如果游戏的状态不是结束的
            # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
            delta = exp.reward + GAMMA * next_val - val
            # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
            # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
            # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
            # 的和
            # 这步体现了累计的优势，即当前获得的优势和之后都有关系
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    # 这里的逆序的作用
    # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
    # ref_v保存的好像是实际Q值
    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v

def ppo_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
    return torch.tensor(np_states.copy())


# 创建一个可以给成员命名的元组，可以类似于类一样使用
TotalReward = collections.namedtuple('TotalReward', field_names='reward')


def data_func(net_ppo, device, train_queue, seed=0):
    seed = int(seed)
    random.seed(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)
    
    env = wrap_dqn(gym.make("ALE/Blackjack-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    agent = ptan.agent.PolicyAgent(lambda x: net_ppo(x)[0], device=device, preprocessor=ppo_states_preprocessor, apply_softmax=True)
    exp_source = ptan.experience.ExperienceSource(env, agent, steps_count=1)

    for exp in exp_source:
        new_rewards = exp_source.pop_total_rewards()
        if new_rewards:
            train_queue.put(TotalReward(reward=np.mean(new_rewards)))
        train_queue.put(exp)


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")



if __name__ == "__main__":
    mp.set_start_method('spawn')
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = select_device(args)

    save_path = os.path.join("saves", "dppo-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    test_env = wrap_dqn(gym.make("ALE/Blackjack-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    # 创建动作预测网络
    net_ppo = ModelPPO(test_env.observation_space.shape, test_env.action_space.n).to(device)
    net_ppo.share_memory()
    print(net_ppo)

    writer = SummaryWriter(comment="-dppo-" + args.name)

    opt_ppo = optim.Adam(net_ppo.parameters(), lr=LEARNING_RATE_ACTOR)
    scheduler = optim.lr_scheduler.StepLR(opt_ppo, step_size=2000, gamma=0.9)

    train_queue = mp.Queue(maxsize=TRAJECTORY_SIZE)
    data_proc_list = []
    for _ in range(PROCESSES_COUNT):
        data_proc = mp.Process(target=data_func, args=(net_ppo, device, train_queue, time.time()))
        data_proc_list.append(data_proc)
        data_proc.start()

    start_idx = 0
    old_ratio_v_mean = 0
    grad_index = 0
    train_frame_idx = 0
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        opt_ppo.load_state_dict(checkpoint['opt_ppo'])
        net_ppo.load_state_dict(checkpoint['net_ppo'])
        start_idx = checkpoint['start_idx']
        old_ratio_v_mean = checkpoint['old_ratio_v_mean']
        grad_index = checkpoint['grad_index']
        train_frame_idx = checkpoint['train_frame_idx']
        scheduler.load_state_dict(checkpoint['scheduler'])
        print("加载模型成功")
                # 打印学习率
        print("Learning Rate:", opt_ppo.param_groups[0]['lr'])
        print("train_frame_idx:", train_frame_idx)
        print("scheduler epoch:", scheduler.last_epoch)
        # 修改学习率调度器step_size
        scheduler.step_size = 10000


    trajectory = [] # 注意，缓冲区更名为轨迹
    best_reward = None
    with ptan.common.utils.RewardTracker(writer) as tracker:
        while True:
            start_idx += 1
            train_traj_data = train_queue.get()
            if isinstance(train_traj_data, TotalReward):
                print("检测到游戏结束，奖励为：", train_traj_data.reward)
                rewards, steps = zip(*train_traj_data.reward)
                writer.add_scalar("episode_steps", np.mean(steps), start_idx)
                tracker.reward(np.mean(rewards), start_idx)
                continue
                

            if start_idx > 0 and start_idx % TEST_ITERS == 0:
                ts = time.time()
                net_ppo.eval()
                rewards, steps = test_net(net_ppo, test_env, count=10, device=device)
                net_ppo.train()
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, start_idx)
                writer.add_scalar("test_steps", steps, start_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                    best_reward = rewards
                common.save_best_model(rewards, net_ppo.state_dict(), save_path, f"ppo-best-{train_frame_idx}", keep_best=10)


            trajectory.append(train_traj_data)
            if len(trajectory) < TRAJECTORY_SIZE:
                continue

            # 这里之所以会需要使用
            traj_states = [t[0].state for t in trajectory]
            traj_actions = [t[0].action for t in trajectory]
            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            # 计算优势值和实际Q值
            traj_adv_v, traj_ref_v = calc_adv_ref(trajectory, net_ppo, traj_states_v, device=device)
            # 根据状态预测动作
            with torch.no_grad():
                old_mu_v = F.softmax(net_ppo(traj_states_v)[0], dim=1)
            # 计算上一轮训练的评价网络、动作网络动作的概率
            old_logprob_v = torch.log(old_mu_v.gather(1, torch.tensor(traj_actions, dtype=torch.int64).to(device).unsqueeze(-1))).detach()

            # normalize advantages 归一化计算得到的Q值 作用是提高训练的稳定性
            traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / torch.std(traj_adv_v)

            # drop last entry from the trajectory, an our adv and ref value calculated without it
            # 这里的作用，为什么截取去除最后一个 P316
            # 可能是因为要和traj_adv_v和traj_ref_v一一对应，因为里面在计算时，是
            # 去掉最后一个的
            trajectory = trajectory[:-1]
            # 这里获取的是从[0到-1)范围的数据
            old_logprob_v = old_logprob_v[:-1].detach()

            sum_loss_value = 0.0
            sum_loss_policy = 0.0
            count_steps = 1
            is_interrupt = False

            # 开始进行PPO的迭代（近端策略优化）
            for epoch in range(PPO_EPOCHES):
                for batch_ofs in range(0, len(trajectory), PPO_BATCH_SIZE):
                    states_v = traj_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    actions_v = traj_actions_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_adv_v = traj_adv_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE].unsqueeze(-1)
                    batch_ref_v = traj_ref_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_old_logprob_v = old_logprob_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]

                    opt_ppo.zero_grad()
                    # 这边就是在计算预测Q值和实际Q值之间的差异损失
                    logits, value = net_ppo(states_v)
                    logits = F.softmax(logits, dim=1)
                    indices = actions_v.long().to(device).unsqueeze(-1)
                    
                    # 计算预测执行动作的高斯概率
                    # ---------------- 日志记录---------------- #
                    
                    # gathered_values = logits.gather(1, indices)
                    # min_value = gathered_values.min().item()
                    # max_value = gathered_values.max().item()
                    # zero_count = (gathered_values == 0).sum().item()
                    # near_zero_count = (gathered_values.abs() < 1e-7).sum().item()

                    # writer.add_scalar("gathered_min", min_value, grad_index)
                    # writer.add_scalar("gathered_max", max_value, grad_index)
                    # writer.add_scalar("gathered_zero_count", zero_count, grad_index)
                    # writer.add_scalar("gathered_near_zero_count", near_zero_count, grad_index)
                    # ---------------- -------- ----------------#

                    logprob_pi_v = torch.log(logits.gather(1, indices) + 1e-7)
                    writer.add_scalar("logprob_pi_v mean", logprob_pi_v.mean().item(), grad_index)
                    writer.add_scalar("logprob_pi_v max", logprob_pi_v.max().item(), grad_index)
                    writer.add_scalar("logprob_pi_v min", logprob_pi_v.min().item(), grad_index)
                    writer.add_scalar("batch_old_logprob_v mean", batch_old_logprob_v.mean().item(), grad_index)
                    writer.add_scalar("batch_old_logprob_v max", batch_old_logprob_v.max().item(), grad_index)
                    writer.add_scalar("batch_old_logprob_v min", batch_old_logprob_v.min().item(), grad_index)
                    writer.add_scalar("ratio_v_pre mean", (logprob_pi_v - batch_old_logprob_v).mean().item(), grad_index)
                    writer.add_scalar("ratio_v_pre max", (logprob_pi_v - batch_old_logprob_v).max().item(), grad_index)
                    writer.add_scalar("ratio_v_pre min", (logprob_pi_v - batch_old_logprob_v).min().item(), grad_index)

                    ratio_v = torch.exp(logprob_pi_v - batch_old_logprob_v)
                    # if old_ratio_v_mean != 0 and abs(ratio_v.mean().item() - old_ratio_v_mean) > 100:
                    #     opt_ppo.zero_grad()
                    #     is_interrupt = True
                    #     break
                    old_ratio_v_mean = ratio_v.mean().item()
                    writer.add_scalar("ratio_v mean", ratio_v.mean().item(), grad_index)
                    writer.add_scalar("ratio_v max", ratio_v.max().item(), grad_index)
                    writer.add_scalar("ratio_v min", ratio_v.min().item(), grad_index)
                    writer.add_scalar("batch_adv_v mean", batch_adv_v.mean().item(), grad_index)
                    writer.add_scalar("batch_adv_v min", batch_adv_v.min().item(), grad_index)
                    writer.add_scalar("batch_adv_v max", batch_adv_v.max().item(), grad_index)
                    surr_obj_v = batch_adv_v * ratio_v
                    writer.add_scalar("surr_obj_v mean", surr_obj_v.mean().item(), grad_index)
                    writer.add_scalar("surr_obj_v min", surr_obj_v.min().item(), grad_index)
                    writer.add_scalar("surr_obj_v max", surr_obj_v.max().item(), grad_index)

                    clipped_surr_v = batch_adv_v * torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS)
                    writer.add_scalar("clipped_surr_v mean", clipped_surr_v.mean().item(), grad_index)
                    writer.add_scalar("clipped_surr_v min", clipped_surr_v.min().item(), grad_index)
                    writer.add_scalar("clipped_surr_v max", clipped_surr_v.max().item(), grad_index)

                    writer.add_scalar("torch.clamp mean", torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS).mean().item(), grad_index)
                    writer.add_scalar("torch.clamp min", torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS).min().item(), grad_index)
                    writer.add_scalar("torch.clamp max", torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS).max().item(), grad_index)

                    entropy = torch.mean(Categorical(logits=logits).entropy())
                    entropy_coef = 0.01

                    loss_policy_v = -(torch.min(surr_obj_v, clipped_surr_v).mean()) - entropy_coef * entropy
                    loss_critic_v = F.smooth_l1_loss(batch_ref_v, value.squeeze())
                    # entropy_loss = torch.mean(Categorical(logits).entropy())
                    total_loss = loss_policy_v + loss_critic_v
                    # total_loss = loss_policy_v + loss_critic_v - 0.01 * entropy_loss
                    total_loss.backward()
                    torch.nn.utils.clip_grad_norm_(net_ppo.parameters(), CLIP_GRAD)
                    opt_ppo.step()

                    # --------------- 记录日志 -----------------#
                    grad_max = 0.0
                    grad_means = 0.0
                    grad_count = 0
                    for p in net_ppo.parameters():
                        grad_max = max(grad_max, p.grad.abs().max().item())
                        grad_means += (p.grad ** 2).mean().sqrt().item()
                        grad_count += 1
                    writer.add_scalar("grad_l2", grad_means / grad_count, grad_index)
                    writer.add_scalar("grad_max", grad_max, grad_index)

                    weights_max = 0.0
                    weights_means = 0.0
                    weights_count = 0
                    for p in net_ppo.parameters():
                        weights_max = max(weights_max, p.data.abs().max().item())
                        weights_means += (p.data ** 2).mean().sqrt().item()
                        weights_count += 1
                    writer.add_scalar("weights_l2", weights_means / weights_count, grad_index)
                    writer.add_scalar("weights_max", weights_max, grad_index)
                    # ------------------------------------------ # 

                    # 记录总损失，用于计算平均损失变化
                    sum_loss_value += loss_critic_v.item()
                    sum_loss_policy += loss_policy_v.item()
                    count_steps += 1
                    grad_index += 1
                if is_interrupt:
                    is_interrupt = False
                    break

            trajectory.clear()
            train_frame_idx += 1
            print("step train_frame_idx:", train_frame_idx)
            scheduler.step()
            writer.add_scalar("advantage", traj_adv_v.mean().item(), start_idx)
            writer.add_scalar("values", traj_ref_v.mean().item(), start_idx)
            writer.add_scalar("loss_policy", sum_loss_policy / count_steps, start_idx)
            writer.add_scalar("loss_value", sum_loss_value / count_steps, start_idx)

            checkpoints = {
                'net_ppo': net_ppo.state_dict(),
                'opt_ppo': opt_ppo.state_dict(),
                'start_idx': start_idx,
                'old_ratio_v_mean': old_ratio_v_mean,
                'grad_index':grad_index,
                'train_frame_idx': train_frame_idx,
                'scheduler': scheduler.state_dict()
            }
            common.save_checkpoints(train_frame_idx, checkpoints, save_path, "ppo", keep_last=3)



