#!/usr/bin/env python3
'''
未验证
参考链接：https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/sac_atari.py#L163

训练记录：
在cloudstudio上训练
1128：训练分数-58分，测试分数-58分，毫无提升，代码是否有问题？接下来尝试使用上面的示例代码的游戏
20250125:调整代码，修复代码错误，重新进行训练
在2号机上重新训练
训练分数-4.30，测试分数-19.8分，继续训练，虽然慢但是训练分数缓慢上升
20250126:
train_count: 33617
act学习率： 0.0003
crt学习率： 0.0003
训练分数-4.240，测试分数10.2分，继续训练
分数确实有上升，但是训练的速度较慢，以10万轮为准，有明显的正向说明模型有效果
20250127:
train_count: 68448
act学习率： 0.0003
crt学习率： 0.0003
训练分数-3.790，测试分数10.2分，确实在上升，不过如果一旦出现了不再上升增加学习率调度器后在尝试
20250128: train_count: 105486,act学习率： 0.0003,crt学习率： 0.0003，训练分数-3.480，测试分数10.2分，加入学习率调度器
20250129:train_count: 141498，act学习率： 0.0003，crt学习率： 0.0003，训练分数-3.120，测试分数15.3分，继续训练
20250130:train_count: 159478,act学习率： 0.0003,crt学习率： 0.0003，训练分数-2.860，测试分数15.3分，继续训练
20250131: train_count: 189332,act学习率： 0.0003,crt学习率： 0.0003,训练分数-2.420，测试分数15.3分，继续训练
20250201: train_count: 260362，act学习率： 0.000243，crt学习率： 0.000243，训练分数1.03分，测试分数32.7分，继续训练
20250202: train_count: 330958，act学习率： 0.0002187，crt学习率： 0.0002187，训练分数2.90，测试分数48.7，继续训练
20250203: train_count: 398289，act学习率： 0.000177147，crt学习率： 0.000177147，训练分数3.61分，测试分数48.8分，继续训练
20250204: train_count: 471007，act学习率： 0.0001594323，crt学习率： 0.0001594323，训练分数7.133，测试分数48.8，继续训练
20250205: train_count: 500274,act学习率： 0.00014348907,crt学习率： 0.00014348907,训练分数6.12分，测试分数56.9分，继续训练
20250206：train_count: 560372，act学习率： 0.000129140163，crt学习率： 0.000129140163，训练分数8.640，测试分数88.3分，继续训练
20250207:train_count: 628702,act学习率： 0.0001162261467,crt学习率： 0.0001162261467,训练分数10分，测试分数88.3分，继续训练
20250208：train_count: 660174，act学习率： 0.00010460353203，crt学习率： 0.00010460353203，训练分数9.830，测试分数88.3分，继续训练一天
20250209:train_count: 711449,act学习率： 9.4143178827e-05,crt学习率： 9.4143178827e-05，训练分数12.327，测试分数100.7，继续训练
20250210:train_count: 757291,act学习率： 8.47288609443e-05,crt学习率： 8.47288609443e-05，训练分数26，测试分数115.8分，继续训练
20250211：train_count: 813501，act学习率： 8.47288609443e-05，crt学习率： 8.47288609443e-05，训练分数 13.286，测试分数115.8，增加学习率调度器，继续训练一天
20250212: train_count: 872079,act学习率： 8.47288609443e-05,crt学习率： 8.47288609443e-05,训练分数31.143，测试分数115.8，，继续训练
20250213: 训练分数16.375分，测试分数115.8分，停止训练，虽然分数较之前有提升，但是发现训练速度较慢，不再继续训练
'''

import os
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from typing import Any

from lib import model, common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)
ENV_ID = "ALE/Atlantis2-v5"
GAMMA = 0.99
BATCH_SIZE = 64
LEARNING_RATE = 1e-4
REPLAY_SIZE = 100000 # 重放缓冲区长度，这么长是为了提高稳定性
REPLAY_INITIAL = 1000 # 重放缓冲区初始化大小
auto_entropy = True
target_entropy = -2
soft_tau = 1e-2

TEST_ITERS = 1000
SYNC_ITERS = 800

NUM_ENVS = 16

reward_scale = 1.

class SAC():
    def __init__(self, env, actor_lr, critic_lr, alpha_lr, tau=0.95, autotune=None, alpha=None, target_entropy_scale=None, device="cpu"):
        super().__init__()
        self.device = device
        self.act_net = model.SACV2Actor(env.observation_space.shape, env.action_space.n).to(device)
        self.crt1_net = model.SacCritic(env.observation_space.shape, env.action_space.n).to(device)
        self.crt2_net = model.SacCritic(env.observation_space.shape, env.action_space.n).to(device)
        # 与TD3的不同点，没有target目标网络
        self.target_crt1_net = ptan.agent.TargetNet(self.crt1_net)
        self.target_crt2_net = ptan.agent.TargetNet(self.crt2_net)

        self.act_opt = optim.Adam(self.act_net.parameters(), lr=actor_lr, eps=1e-4)
        self.scheduler_act = optim.lr_scheduler.StepLR(self.act_opt, step_size=50000, gamma=0.9)
        self.crt_opt = optim.Adam(list(self.crt1_net.parameters()) + list(self.crt2_net.parameters()), lr=critic_lr, eps=1e-4)
        self.scheduler_crt = optim.lr_scheduler.StepLR(self.crt_opt, step_size=50000, gamma=0.9)

        self.log_alpha = None
        self.a_optimizer = None
        self.autotune = autotune
        if autotune is not None:
            self.target_entropy = -target_entropy_scale * torch.log(1 / torch.tensor(env.action_space.n))
            self.log_alpha = torch.zeros(1, requires_grad=True, device=device, dtype=torch.float)
            self.alpha = self.log_alpha.exp().item()
            self.a_optimizer = optim.Adam([self.log_alpha], lr=alpha_lr, eps=1e-4)
        else:
            self.alpha = alpha

        self.tau = tau
        self.device = device

    def get_action(self, states):
        # states_v = torch.tensor(states, dtype=torch.float32).to(self.device)
        states_v = torch.as_tensor(states, dtype=torch.float32, device=self.device)
        logits = self.act_net(states_v)
        action_dist = torch.distributions.Categorical(logits=logits)
        # return F.log_softmax(logits), action_dist.probs
        return F.log_softmax(logits, dim=-1), action_dist.probs


    def soft_update(self):
        self.target_crt1_net.alpha_sync(self.tau)
        self.target_crt2_net.alpha_sync(self.tau)


def test_net(net, env, count=10, device="cpu"):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    with torch.no_grad():
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                probs = net(obs_v)
                probs = probs.cpu().numpy()
                action = np.argmax(probs)
                obs, reward, done, trunc, _ = env.step(action)
                rewards += reward
                steps += 1
                if done or trunc:
                    break
    return rewards / count, steps / count


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        # reward /= 100  # 不缩放奖励，因为基础击中奖励就只有1～2分
        if reward > 10:
            reward //= 10
            if reward < 2:
                reward = 3

        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            self.previous_lives = current_lives
            reward -= self.life_loss_penalty

        return obs, reward, done, truncated, info


def wrap_dqn(env_id, stack_frames=4, episodic_life=True, reward_clipping=True):
    def thunk():
        env = gym.make(ENV_ID, obs_type='rgb', frameskip=1, repeat_action_probability=0.0)
        if episodic_life:
            # 将多条生命的游戏模拟成单条生命ActorCriticAgent
            env = ptan.common.wrappers.EpisodicLifeEnv(env)
        # 增强初始化
        env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)
        env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)

        if 'FIRE' in env.unwrapped.get_action_meanings():
            env = FireResetEnv(env)
        env = common.ProcessFrame84(env)
        env = ptan.common.wrappers.ImageToPyTorch(env)
        env = ptan.common.wrappers.FrameStack(env, stack_frames)
        env = RewardPenaltyWrapper(env)

        return env
    return thunk

class PolicyAgent(ptan.agent.BaseAgent):
    """
    Policy agent gets action probabilities from the model and samples actions from it
    """
    # TODO: unify code with DQNAgent, as only action selector is differs.
    def __init__(self, sac, preprocessor=ptan.agent.default_states_preprocessor, device="cpu"):
        '''
            model: 策略动作推理网络
            preprocessor: 将计算的结果转换的数据类型，比如转换为float32
            apply_softmax: 使用对model的计算结果使用softmax计算结果
        '''
        self.sac = sac
        self.preprocessor = preprocessor
        self.device = device

    @torch.no_grad()
    def __call__(self, states, agent_states=None):
        """
        Return actions from given list of states
        :param states: list of states 在本代理器中，agent_states没有参与计算，仅仅是保证其维度和states一样
        :return: list of actions
        """
        if agent_states is None:
            agent_states = [None] * len(states)
        # 如果定义了预处理器，则进行预处理擦欧总
        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)
        actions = sac.get_action(states).cpu().numpy()
        return actions, agent_states
    

def select_device():
    if torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available():
        return torch.device("mps")
    return torch.device("cpu")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="atlantis2", help="Name of the run")
    args = parser.parse_args()
    autotune = True
    device = select_device()

    save_path = os.path.join("saves", "sac-q-v2-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = gym.vector.SyncVectorEnv([wrap_dqn(ENV_ID, episodic_life=True) for _ in range(NUM_ENVS)])
    test_env = wrap_dqn(ENV_ID, episodic_life=False)()

    # 构建动作网络和评价网络
    sac = SAC(test_env, device=device, actor_lr=3e-4, critic_lr=3e-4, alpha_lr=3e-4, autotune=autotune, target_entropy_scale=0.89, alpha=0.2, tau=0.005)
    print(sac.act_net)
    print(sac.crt1_net)
    print(sac.crt2_net)

    frame_idx = 0
    train_count = 0
    agent = ptan.agent.PolicyAgent(sac.act_net, device=device, apply_softmax=True)
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        sac.act_net.load_state_dict(checkpoint["act_net"])
        sac.crt1_net.load_state_dict(checkpoint["crt1_net"])
        sac.crt2_net.load_state_dict(checkpoint["crt2_net"])
        sac.target_crt1_net.target_model.load_state_dict(checkpoint["target_crt1_net"])
        sac.target_crt2_net.target_model.load_state_dict(checkpoint["target_crt2_net"])
        sac.act_opt.load_state_dict(checkpoint["act_opt"])
        sac.scheduler_act.load_state_dict(checkpoint["scheduler_act"])
        sac.crt_opt.load_state_dict(checkpoint["crt_opt"])
        sac.scheduler_crt.load_state_dict(checkpoint["scheduler_crt"])
        if sac.autotune:
            sac.log_alpha = checkpoint["log_alpha"]
            sac.a_optimizer.load_state_dict(checkpoint["log_alpha_opt"])
        frame_idx = checkpoint["frame_idx"]
        train_count = checkpoint["train_count"]
        print("加载模型成功")
        print("train_count:", train_count)
        print("act学习率：", sac.act_opt.param_groups[0]['lr'])
        print("crt学习率：", sac.crt_opt.param_groups[0]['lr'])

    writer = SummaryWriter(comment="-sac-q-v2-" + args.name)
    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA, steps_count=1, vectorized=True)
    buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=REPLAY_SIZE)

    best_reward = None
    with ptan.common.utils.RewardTracker(writer) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
            while True:
                frame_idx += 1
                # 从经验缓冲区执行一轮游戏或者执行游戏过程中采集到指定长度的游戏数据
                buffer.populate(1)
                # 提取目前位置所有的记录
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    # 记录当前的训练进度并判断是否达到了奖励目标
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", steps[0], frame_idx)
                    tracker.reward(rewards[0], frame_idx)

                if len(buffer) < REPLAY_INITIAL:
                    continue

                # 从缓冲区里面采样数据
                batch = buffer.sample(BATCH_SIZE)
                with torch.no_grad():
                    states_v, actions_v, rewards_v, dones_mask, last_states_v = common.unpack_batch_sac_q(batch, device)

                    # 使用目标动作预测网路，根据下一个状态预测执行的动作
                    last_state_log_pi, last_state_action_probs = sac.get_action(last_states_v)
                    q1_v = sac.target_crt1_net.target_model(last_states_v)
                    q2_v = sac.target_crt2_net.target_model(last_states_v)
                    min_v = torch.sum(last_state_action_probs * (torch.min(q1_v, q2_v) - sac.alpha * last_state_log_pi), dim=1)

                    min_v[dones_mask] = 0.0
                    pred_q_v = rewards_v.unsqueeze(dim=-1) + min_v.unsqueeze(dim=-1) * GAMMA
                # 计算预测的当前Q值和Bellman计算的到的Q值之间的差异
                # 并更新梯度 这里的方式就和之前的Q值单元的一致
                # train critic
                sac.crt_opt.zero_grad()
                # 根据状态和动作，得到评价，这里是根据实际游戏的状态和动作获取评价
                q1_v = sac.crt1_net(states_v).gather(1, actions_v.unsqueeze(dim=1))
                q2_v = sac.crt2_net(states_v).gather(1, actions_v.unsqueeze(dim=1))
                critic_1_loss_v = F.mse_loss(q1_v, pred_q_v)
                critic_2_loss_v = F.mse_loss(q2_v, pred_q_v)
                tb_tracker.track("loss_critic_1", critic_1_loss_v, frame_idx)
                tb_tracker.track("loss_critic_2", critic_2_loss_v, frame_idx)

                critic_loss_v = critic_1_loss_v + critic_2_loss_v
                critic_loss_v.backward()
                sac.crt_opt.step()
                sac.scheduler_crt.step()
                
                tb_tracker.track("critic_ref", pred_q_v.mean(), frame_idx)

                # 预测动作
                # train actor
                sac.act_opt.zero_grad()
                log_pi, action_probs = sac.get_action(states_v)

                with torch.no_grad():
                    q1_v = sac.crt1_net(states_v)
                    q2_v = sac.crt2_net(states_v)
                    min_q_v = torch.min(q1_v, q2_v)
                actor_loss_v = torch.mean(action_probs * ((sac.alpha * log_pi) - min_q_v))
                actor_loss_v.backward()
                sac.act_opt.step()
                tb_tracker.track("loss_actor", actor_loss_v, frame_idx)
                sac.scheduler_act.step()

                if autotune:
                    # re-use action probabilities for temperature loss
                    alpha_loss = (action_probs.detach() * (-sac.log_alpha.exp() * (log_pi + sac.target_entropy).detach())).mean()
                    sac.a_optimizer.zero_grad()
                    alpha_loss.backward()
                    sac.a_optimizer.step()
                    sac.alpha = sac.log_alpha.exp().item()

                train_count += 1
                if frame_idx % SYNC_ITERS:
                    sac.soft_update()

                    checkpoint = {
                        "act_net": sac.act_net.state_dict(),
                        "crt1_net": sac.crt1_net.state_dict(),
                        "crt2_net": sac.crt2_net.state_dict(),
                        "frame_idx": frame_idx,
                        "train_count": train_count,
                        "best_reward": best_reward,
                        "target_crt1_net": sac.target_crt1_net.target_model.state_dict(),
                        "target_crt2_net": sac.target_crt2_net.target_model.state_dict(),
                        "act_opt": sac.act_opt.state_dict(),
                        "scheduler_act": sac.scheduler_act.state_dict(),
                        "crt_opt": sac.crt_opt.state_dict(),
                        "scheduler_crt": sac.scheduler_crt.state_dict(),
                    }
                    if autotune:
                        checkpoint.update({"log_alpha": sac.log_alpha, "log_alpha_opt": sac.a_optimizer.state_dict()})

                    common.save_checkpoints(train_count, checkpoint, save_path, "sac-v2", keep_last=5)


                if frame_idx % TEST_ITERS == 0:
                    # 测试并保存最好测试结果的庶数据
                    ts = time.time()
                    rewards, steps = test_net(sac.act_net, test_env, device=device)
                    print("Test done in %.2f sec, reward %.3f, steps %d" % (
                        time.time() - ts, rewards, steps))
                    writer.add_scalar("test_reward", rewards, frame_idx)
                    writer.add_scalar("test_steps", steps, frame_idx)
                    
                    if best_reward is None or best_reward < rewards:
                        if best_reward is not None:
                            print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                        best_reward = rewards
                    common.save_best_model(rewards, checkpoint, save_path, f"sac-v2-best-{train_count}", keep_best=10)
    
