#!/usr/bin/env python3
'''
未验证，在CartPole-v1上验证通过，所以需要考虑调整超参数
参考链接：https://blog.csdn.net/dgvv4/article/details/129558965
https://blog.csdn.net/qq_44389347/article/details/138604646

20241116: 经过一天的训练，训练分数一直没有提升，代码有问题
20241127：调整模型结构
在cloudstudio上重新训练
20241128：训练分数-59分，毫无提升，代码应该是有问题
20250127: 完成了代码的调整，计划重新进行训练,总体上并无有效的更改，其中会影响到训练的因素有多步探索改为1步和增加了调度器，其余并无实际改变，训练试试
在2号机上训练
20250203: 训练分数-7.750,测试分数-54分，继续训练
20250204: Learning rate: 0.000129140163，Learning rate: 0.000129140163，Learning rate: 0.000129140163，训练分数-7.530，测试分数-53分，继续训练
20250205: Learning rate: 8.47288609443e-05,Learning rate: 8.47288609443e-05,Learning rate: 8.47288609443e-05，训练分数-7.830，测试分数-53分，无进步，对比carpole的差异，调整代码
'''

import os
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from typing import Any

from lib import model, common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)
ENV_ID = "ALE/Atlantis2-v5"
GAMMA = 0.99
BATCH_SIZE = 64
LEARNING_RATE = 1e-4
REPLAY_SIZE = 100000 # 重放缓冲区长度，这么长是为了提高稳定性
REPLAY_INITIAL = 1000 # 重放缓冲区初始化大小
auto_entropy = True
target_entropy = -2
soft_tau = 1e-2

TEST_ITERS = 1000
NUM_ENVS = 16

reward_scale = 1.

class SAC():
    def __init__(self, env, actor_lr, critic_lr, alpha_lr, target_entropy, tau, gamma,  device="cpu"):
        super().__init__()
        self.device = device
        self.act_net = model.SACActor(env.observation_space.shape, env.action_space.n).to(device)
        self.crt1_net = model.SacCritic(env.observation_space.shape, env.action_space.n).to(device)
        self.crt2_net = model.SacCritic(env.observation_space.shape, env.action_space.n).to(device)
        # 与TD3的不同点，没有target目标网络
        self.target_crt1_net = ptan.agent.TargetNet(self.crt1_net)
        self.target_crt2_net = ptan.agent.TargetNet(self.crt2_net)

        self.act_opt = optim.Adam(self.act_net.parameters(), lr=actor_lr)
        self.act_scheduler = optim.lr_scheduler.StepLR(self.act_opt, step_size=50000, gamma=0.9)
        self.crt1_opt = optim.Adam(self.crt1_net.parameters(), lr=critic_lr)
        self.crt1_scheduler = optim.lr_scheduler.StepLR(self.crt1_opt, step_size=50000, gamma=0.9)
        self.crt2_opt = optim.Adam(self.crt2_net.parameters(), lr=critic_lr)
        self.crt2_scheduler = optim.lr_scheduler.StepLR(self.crt2_opt, step_size=50000, gamma=0.9)

        self.log_alpha = torch.tensor(np.log(0.01), dtype=torch.float).to(device)
        self.log_alpha.requires_grad = True
        self.log_alpha_optimizer = optim.Adam([self.log_alpha], lr=alpha_lr)
        self.log_alpha_scheduler = optim.lr_scheduler.StepLR(self.log_alpha_optimizer, step_size=50000, gamma=0.9)

        self.target_entropy = target_entropy
        self.gamma = gamma
        self.tau = tau
        self.device = device

    def get_action(self, states):
        states_v = torch.tensor(states, dtype=torch.float32).to(self.device)
        probs = self.act_net(states_v)
        action_dist = torch.distributions.Categorical(probs=probs)
        action = action_dist.sample().item()
        return action


    def soft_update(self):
        self.target_crt1_net.alpha_sync(self.tau)
        self.target_crt2_net.alpha_sync(self.tau)


def test_net(net, env, count=10, device="cpu"):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    with torch.no_grad():
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                probs = net(obs_v)
                probs = probs.cpu().numpy()
                action = np.argmax(probs)
                obs, reward, done, trunc, _ = env.step(action)
                rewards += reward
                steps += 1
                if done or trunc:
                    break
    return rewards / count, steps / count


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info


def wrap_dqn(ENV_ID, stack_frames=4, episodic_life=True, reward_clipping=True):
    def thunk():
        env = gym.make(ENV_ID, obs_type='rgb', frameskip=4, repeat_action_probability=0.0)
        if episodic_life:
            # 将多条生命的游戏模拟成单条生命ActorCriticAgent
            env = ptan.common.wrappers.EpisodicLifeEnv(env)
        # 增强初始化
        env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

        if 'FIRE' in env.unwrapped.get_action_meanings():
            env = FireResetEnv(env)
        env = common.ProcessFrame84(env)
        env = ptan.common.wrappers.ImageToPyTorch(env)
        env = ptan.common.wrappers.FrameStack(env, stack_frames)
        env = common.RewardPenaltyWrapper(env)

        return env
    return thunk

class PolicyAgent(ptan.agent.BaseAgent):
    """
    Policy agent gets action probabilities from the model and samples actions from it
    """
    # TODO: unify code with DQNAgent, as only action selector is differs.
    def __init__(self, sac, preprocessor=ptan.agent.default_states_preprocessor, device="cpu"):
        '''
            model: 策略动作推理网络
            preprocessor: 将计算的结果转换的数据类型，比如转换为float32
            apply_softmax: 使用对model的计算结果使用softmax计算结果
        '''
        self.sac = sac
        self.preprocessor = preprocessor
        self.device = device

    @torch.no_grad()
    def __call__(self, states, agent_states=None):
        """
        Return actions from given list of states
        :param states: list of states 在本代理器中，agent_states没有参与计算，仅仅是保证其维度和states一样
        :return: list of actions
        """
        if agent_states is None:
            agent_states = [None] * len(states)
        # 如果定义了预处理器，则进行预处理擦欧总
        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)
        actions = sac.get_action(states).cpu().numpy()
        return actions, agent_states


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif args.cuda and torch.backends.mps.is_available():
        return torch.device("mps")
    return torch.device("cpu")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = select_device(args)

    save_path = os.path.join("saves", "sac-q-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = gym.vector.SyncVectorEnv([wrap_dqn(ENV_ID=ENV_ID, episodic_life=True) for _ in range(NUM_ENVS)])
    test_env = wrap_dqn(ENV_ID=ENV_ID, episodic_life=False)()

    # 构建动作网络和评价网络
    sac = SAC(test_env, device=device, actor_lr=3e-4, critic_lr=3e-4, alpha_lr=3e-4, target_entropy=-1, tau=0.995, gamma=0.9)
    print(sac.act_net)
    print(sac.crt1_net)
    print(sac.crt2_net)

    frame_idx = 0
    agent = ptan.agent.PolicyAgent(sac.act_net, device=device)
    # agent = PolicyAgent(sac, device=device)
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        sac.act_net.load_state_dict(checkpoint["act_net"])
        sac.crt1_net.load_state_dict(checkpoint["crt1_net"])
        sac.crt2_net.load_state_dict(checkpoint["crt2_net"])
        sac.target_crt1_net.target_model.load_state_dict(checkpoint["target_crt1_net"])
        sac.target_crt2_net.target_model.load_state_dict(checkpoint["target_crt2_net"])
        sac.act_opt.load_state_dict(checkpoint["act_opt"])
        sac.crt1_opt.load_state_dict(checkpoint["crt1_opt"])
        sac.crt2_opt.load_state_dict(checkpoint["crt2_opt"])
        sac.act_scheduler.load_state_dict(checkpoint["act_scheduler"])
        sac.crt1_scheduler.load_state_dict(checkpoint["crt1_scheduler"])
        sac.crt2_scheduler.load_state_dict(checkpoint["crt2_scheduler"])
        sac.log_alpha_scheduler.load_state_dict(checkpoint["log_alpha_scheduler"])
        sac.log_alpha = checkpoint["log_alpha"]
        sac.log_alpha_optimizer.load_state_dict(checkpoint["log_alpha_opt"])
        frame_idx = checkpoint["frame_idx"]
        print("加载模型成功")
        # 学习率
        print(f"Learning rate: {sac.act_opt.param_groups[0]['lr']}")
        print(f"Learning rate: {sac.crt1_opt.param_groups[0]['lr']}")
        print(f"Learning rate: {sac.crt2_opt.param_groups[0]['lr']}")

    writer = SummaryWriter(comment="-sac-q_" + args.name)
    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA, steps_count=1, vectorized=True)
    buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=REPLAY_SIZE)

    best_reward = None
    with ptan.common.utils.RewardTracker(writer) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
            while True:
                frame_idx += 1
                # 从经验缓冲区执行一轮游戏或者执行游戏过程中采集到指定长度的游戏数据
                buffer.populate(1)
                # 提取目前位置所有的记录
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    # 记录当前的训练进度并判断是否达到了奖励目标
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", steps[0], frame_idx)
                    tracker.reward(rewards[0], frame_idx)

                if len(buffer) < REPLAY_INITIAL:
                    continue

                # 从缓冲区里面采样数据
                batch = buffer.sample(BATCH_SIZE)
                states_v, actions_v, rewards_v, dones_mask, last_states_v = common.unpack_batch_sac_q(batch, device)

                # 使用目标动作预测网路，根据下一个状态预测执行的动作
                pred_next_probs = sac.act_net(last_states_v)
                pred_next_log_probs = torch.log(pred_next_probs + 1e-8)
                entropy = -torch.sum(pred_next_probs * pred_next_log_probs, dim=1, keepdim=True)
                q1_v = sac.target_crt1_net.target_model(last_states_v)
                q2_v = sac.target_crt2_net.target_model(last_states_v)
                min_v = torch.sum(pred_next_probs * torch.min(q1_v, q2_v), dim=1, keepdim=True)
                pred_next_v = min_v + sac.log_alpha.exp() * entropy
                pred_next_v[dones_mask.bool()] = 0.0
                pred_next_q_v = rewards_v.unsqueeze(dim=-1) + pred_next_v * GAMMA

                # 计算预测的当前Q值和Bellman计算的到的Q值之间的差异
                # 并更新梯度 这里的方式就和之前的Q值单元的一致
                # train critic
                sac.crt1_opt.zero_grad()
                # 根据状态和动作，得到评价，这里是根据实际游戏的状态和动作获取评价
                q_v = sac.crt1_net(states_v).gather(1, actions_v.unsqueeze(dim=1))
                critic_1_loss_v = torch.mean(F.mse_loss(q_v, pred_next_q_v.detach()))
                critic_1_loss_v.backward()
                sac.crt1_opt.step()
                # sac.crt1_scheduler.step()
                tb_tracker.track("loss_critic_1", critic_1_loss_v, frame_idx)

                sac.crt2_opt.zero_grad()
                q_v = sac.crt2_net(states_v).gather(1, actions_v.unsqueeze(dim=1))
                critic_2_loss_v = torch.mean(F.mse_loss(q_v, pred_next_q_v.detach()))
                critic_2_loss_v.backward()
                sac.crt2_opt.step()
                # sac.crt2_scheduler.step()
                tb_tracker.track("loss_critic_2", critic_2_loss_v, frame_idx)
                tb_tracker.track("critic_ref", pred_next_q_v.mean(), frame_idx)

                # 预测动作
                # train actor
                sac.act_opt.zero_grad()
                probs = sac.act_net(states_v)
                log_probs = torch.log(probs + 1e-8)
                entropy = -torch.sum(probs * log_probs, dim=1, keepdim=True)

                q1_v = sac.crt1_net(states_v)
                q2_v = sac.crt2_net(states_v)
                # # 使用期望Q值作为基准
                min_q_v = torch.sum(probs * torch.min(q1_v, q2_v), dim=1, keepdim=True)
                actor_loss_v = torch.mean(-sac.log_alpha.exp() * entropy - min_q_v)
                # # 更标准的SAC actor损失
                # min_q_current = torch.min(q1_v, q2_v)
                # actor_loss_v = torch.mean(torch.sum(probs * (sac.log_alpha.exp() * log_probs - min_q_current), dim=1))
                actor_loss_v.backward()
                sac.act_opt.step()
                # sac.act_scheduler.step()
                tb_tracker.track("loss_actor", actor_loss_v, frame_idx)

                sac.log_alpha_optimizer.zero_grad()
                alpha_loss_v = torch.mean((entropy - sac.target_entropy).detach() * sac.log_alpha.exp())
                alpha_loss_v.backward()
                sac.log_alpha_optimizer.step()
                sac.log_alpha_scheduler.step()

                sac.soft_update()


                if frame_idx % TEST_ITERS == 0:
                    # 测试并保存最好测试结果的庶数据
                    ts = time.time()
                    rewards, steps = test_net(sac.act_net, test_env, device=device)
                    print("Test done in %.2f sec, reward %.3f, steps %d" % (
                        time.time() - ts, rewards, steps))
                    writer.add_scalar("test_reward", rewards, frame_idx)
                    writer.add_scalar("test_steps", steps, frame_idx)
                    checkpoint = {
                        "act_net": sac.act_net.state_dict(),
                        "crt1_net": sac.crt1_net.state_dict(),
                        "crt2_net": sac.crt2_net.state_dict(),
                        "frame_idx": frame_idx,
                        "best_reward": best_reward,
                        "target_crt1_net": sac.target_crt1_net.target_model.state_dict(),
                        "target_crt2_net": sac.target_crt2_net.target_model.state_dict(),
                        "act_opt": sac.act_opt.state_dict(),
                        "crt1_opt": sac.crt1_opt.state_dict(),
                        "crt2_opt": sac.crt2_opt.state_dict(),
                        "act_scheduler": sac.act_scheduler.state_dict(),
                        "crt1_scheduler": sac.crt1_scheduler.state_dict(),
                        "crt2_scheduler": sac.crt2_scheduler.state_dict(),
                        "log_alpha": sac.log_alpha,
                        "log_alpha_opt": sac.log_alpha_optimizer.state_dict(),
                        "log_alpha_scheduler": sac.log_alpha_scheduler.state_dict(),
                    }
                    common.save_checkpoints(frame_idx, checkpoint, save_path, "sac", keep_last=5)
                    if best_reward is None or best_reward < rewards:
                        if best_reward is not None:
                            print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                        best_reward = rewards
                    common.save_best_model(rewards, checkpoint, save_path, f"sac-best-{frame_idx}", keep_best=10)
    pass
