#!/usr/bin/env python3
'''
完成适配，由于和train_sac_q.py逻辑一致，所以考虑到train_sac_q.py
参考链接：https://blog.csdn.net/dgvv4/article/details/129558965
https://blog.csdn.net/qq_44389347/article/details/138604646

todo 对比和td3的不同

训练记录：
20241114：训练了大概5~6小时，训练分数稳定在-59分一直没有上涨，测试分数也一直维持在-59分，训练效果有问题，停止训练，调整代码
20241115：虽然调整了代码，但是貌似依旧无法提升分数，代码结构有问题？
20241127：调整模型结构
20250611：完成模型调整
20250709：开始训练，在2号机上训练，训练不通过，待调整到更简单的环境下验证算法有效性
'''

import os
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from typing import Any

from lib import model, common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)
ENV_ID = "ALE/Atlantis2-v5"
GAMMA = 0.99
BATCH_SIZE = 64
LEARNING_RATE = 1e-4
REPLAY_SIZE = 100000 # 重放缓冲区长度，这么长是为了提高稳定性
REPLAY_INITIAL = 10000 # 重放缓冲区初始化大小
auto_entropy = True
# target_entropy = -2
soft_tau = 1e-2

TEST_ITERS = 1000

reward_scale = 1.

class SAC():
    def __init__(self, env, actor_lr, critic_lr, alpha_lr, target_entropy, tau, gamma,  device="cpu"):
        super().__init__()
        self.device = device
        self.act_net = model.SACActor(env.observation_space.shape, env.action_space.n).to(device)
        self.crt1_net = model.SacCritic(env.observation_space.shape, env.action_space.n).to(device)
        self.crt2_net = model.SacCritic(env.observation_space.shape, env.action_space.n).to(device)
        # 与TD3的不同点，没有target目标网络
        self.target_crt1_net = ptan.agent.TargetNet(self.crt1_net)
        self.target_crt2_net = ptan.agent.TargetNet(self.crt2_net)

        self.act_opt = optim.Adam(self.act_net.parameters(), lr=actor_lr)
        self.crt1_opt = optim.Adam(self.crt1_net.parameters(), lr=critic_lr)
        self.crt2_opt = optim.Adam(self.crt2_net.parameters(), lr=critic_lr)

        self.log_alpha = torch.tensor(np.log(0.01), dtype=torch.float).to(device)
        self.log_alpha.requires_grad = True
        self.log_alpha_optimizer = optim.Adam([self.log_alpha], lr=alpha_lr)

        self.target_entropy = target_entropy
        self.gamma = gamma
        self.tau = tau
        self.device = device

    def get_action(self, states):
        states_v = torch.tensor(states, dtype=torch.float32).to(self.device)
        probs = self.act_net(states_v)
        action_dist = torch.distributions.Categorical(probs=probs)
        action = action_dist.sample().item()
        return action


    def soft_update(self):
        self.target_crt1_net.alpha_sync(self.tau)
        self.target_crt2_net.alpha_sync(self.tau)


def test_net(net, env, count=10, device="cpu"):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    with torch.no_grad():
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                probs = net(obs_v)
                probs = probs.cpu().numpy()
                action = np.argmax(probs)
                obs, reward, done, trunc, _ = env.step(action)
                rewards += reward
                steps += 1
                if done or trunc:
                    break
    return rewards / count, steps / count


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info
    
    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        reward /= 100  # 缩放奖励

        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives

        return obs, reward, done, truncated, info


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    env = ptan.common.wrappers.ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = ptan.common.wrappers.FrameStack(env, stack_frames)
    env = RewardPenaltyWrapper(env)
    return env


class PolicyAgent(ptan.agent.BaseAgent):
    """
    Policy agent gets action probabilities from the model and samples actions from it
    """
    # TODO: unify code with DQNAgent, as only action selector is differs.
    def __init__(self, sac, preprocessor=ptan.agent.default_states_preprocessor, device="cpu"):
        '''
            model: 策略动作推理网络
            preprocessor: 将计算的结果转换的数据类型，比如转换为float32
            apply_softmax: 使用对model的计算结果使用softmax计算结果
        '''
        self.sac = sac
        self.preprocessor = preprocessor
        self.device = device

    @torch.no_grad()
    def __call__(self, states, agent_states=None):
        """
        Return actions from given list of states
        :param states: list of states 在本代理器中，agent_states没有参与计算，仅仅是保证其维度和states一样
        :return: list of actions
        """
        if agent_states is None:
            agent_states = [None] * len(states)
        # 如果定义了预处理器，则进行预处理擦欧总
        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)
        actions = sac.get_action(states).cpu().numpy()
        return actions, agent_states


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")
    
    save_path = os.path.join("saves", "sac-" + args.name)
    os.makedirs(save_path, exist_ok=True)
    
    env = wrap_dqn(gym.make(ENV_ID, obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    test_env = wrap_dqn(gym.make(ENV_ID, obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    
    # 构建动作网络和评价网络
    sac = SAC(env, device=device, actor_lr=3e-4, critic_lr=3e-4, alpha_lr=3e-4, target_entropy=-0.5 * env.action_space.n, tau=0.99, gamma=0.9)
    print(sac.act_net)
    print(sac.crt1_net)
    print(sac.crt2_net)

    frame_idx = 0
    agent = ptan.agent.PolicyAgent(sac.act_net, device=device)
    # agent = PolicyAgent(sac, device=device)
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        sac.act_net.load_state_dict(checkpoint["act_net"])
        sac.crt1_net.load_state_dict(checkpoint["crt1_net"])
        sac.crt2_net.load_state_dict(checkpoint["crt2_net"])
        sac.target_crt1_net.target_model.load_state_dict(checkpoint["target_crt1_net"])
        sac.target_crt2_net.target_model.load_state_dict(checkpoint["target_crt2_net"])
        sac.act_opt.load_state_dict(checkpoint["act_opt"])
        sac.crt1_opt.load_state_dict(checkpoint["crt1_opt"])
        sac.crt2_opt.load_state_dict(checkpoint["crt2_opt"])
        sac.log_alpha = checkpoint["log_alpha"]
        sac.log_alpha_optimizer.load_state_dict(checkpoint["log_alpha_opt"])
        agent.step = checkpoint["step"]
        frame_idx = checkpoint["frame_idx"]
        print("加载模型成功")

    writer = SummaryWriter(comment="-sac_" + args.name)
    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA, steps_count=4)
    buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=REPLAY_SIZE)

    best_reward = None
    with ptan.common.utils.RewardTracker(writer) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
            while True:
                frame_idx += 1
                # 从经验缓冲区执行一轮游戏或者执行游戏过程中采集到指定长度的游戏数据
                buffer.populate(1)
                # 提取目前位置所有的记录
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    # 记录当前的训练进度并判断是否达到了奖励目标
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", steps[0], frame_idx)
                    tracker.reward(rewards[0], frame_idx)

                if len(buffer) < REPLAY_INITIAL:
                    continue

                # 从缓冲区里面采样数据
                batch = buffer.sample(BATCH_SIZE)
                states_v, actions_v, rewards_v, dones_mask, last_states_v = common.unpack_batch_sac_q(batch, device)

                with torch.no_grad():
                    # 使用目标动作预测网路，根据下一个状态预测执行的动作
                    pred_probs = sac.act_net(last_states_v)
                    pred_log_probs = torch.log(pred_probs + 1e-8)
                    entropy = -torch.sum(pred_probs * pred_log_probs, dim=1, keepdim=True)
                    q1_v = sac.target_crt1_net.target_model(last_states_v)
                    q2_v = sac.target_crt2_net.target_model(last_states_v)
                    min_q_next = torch.min(q1_v, q2_v)  # [batch, n_actions]
                    target_v = torch.sum(pred_probs * (min_q_next - sac.log_alpha.exp() * pred_log_probs), dim=1, keepdim=True)
                    pred_v = target_v + sac.log_alpha.exp() * entropy

                pred_v[dones_mask.bool()] = 0.0
                pred_q_v = rewards_v.unsqueeze(dim=-1) + pred_v * GAMMA
                # 计算预测的当前Q值和Bellman计算的到的Q值之间的差异
                # 并更新梯度 这里的方式就和之前的Q值单元的一致                # train critic 1
                sac.crt1_opt.zero_grad()
                q1_v = sac.crt1_net(states_v)
                q1_selected = q1_v.gather(1, actions_v.long().unsqueeze(-1)).squeeze(-1)
                critic_1_loss_v = F.mse_loss(q1_selected.unsqueeze(-1), pred_q_v.detach())
                critic_1_loss_v.backward()
                sac.crt1_opt.step()
                tb_tracker.track("loss_critic_1", critic_1_loss_v, frame_idx)

                # train critic 2
                sac.crt2_opt.zero_grad()
                q2_v = sac.crt2_net(states_v)
                q2_selected = q2_v.gather(1, actions_v.long().unsqueeze(-1)).squeeze(-1)
                critic_2_loss_v = F.mse_loss(q2_selected.unsqueeze(-1), pred_q_v.detach())
                critic_2_loss_v.backward()
                sac.crt2_opt.step()
                tb_tracker.track("loss_critic_2", critic_2_loss_v, frame_idx)
                tb_tracker.track("critic_ref", pred_q_v.mean(), frame_idx)                # train actor
                sac.act_opt.zero_grad()
                probs = sac.act_net(states_v)
                log_probs = torch.log(probs + 1e-8)
                entropy = -torch.sum(probs * log_probs, dim=1, keepdim=True)

                # 正确的Actor损失计算
                q1_current = sac.crt1_net(states_v)  # [batch, n_actions]
                q2_current = sac.crt2_net(states_v)  # [batch, n_actions]
                min_q_current = torch.min(q1_current, q2_current)  # [batch, n_actions]
                actor_loss_v = torch.mean(torch.sum(probs * (sac.log_alpha.exp() * log_probs - min_q_current), dim=1))
                actor_loss_v.backward()
                sac.act_opt.step()
                tb_tracker.track("loss_actor", actor_loss_v, frame_idx)

                # train alpha (temperature parameter)
                sac.log_alpha_optimizer.zero_grad()
                alpha_loss_v = torch.mean((entropy - sac.target_entropy).detach() * sac.log_alpha.exp())
                alpha_loss_v.backward()
                sac.log_alpha_optimizer.step()
                tb_tracker.track("alpha_loss", alpha_loss_v, frame_idx)
                tb_tracker.track("alpha_value", sac.log_alpha.exp(), frame_idx)

                sac.soft_update()


                if frame_idx % TEST_ITERS == 0:
                    # 测试并保存最好测试结果的庶数据
                    ts = time.time()
                    rewards, steps = test_net(sac.act_net, test_env, device=device)
                    print("Test done in %.2f sec, reward %.3f, steps %d" % (
                        time.time() - ts, rewards, steps))
                    writer.add_scalar("test_reward", rewards, frame_idx)
                    writer.add_scalar("test_steps", steps, frame_idx)
                    checkpoint = {
                        "act_net": sac.act_net.state_dict(),
                        "crt1_net": sac.crt1_net.state_dict(),
                        "crt2_net": sac.crt2_net.state_dict(),
                        "frame_idx": frame_idx,
                        "best_reward": best_reward,
                        "target_crt1_net": sac.target_crt1_net.target_model.state_dict(),
                        "target_crt2_net": sac.target_crt2_net.target_model.state_dict(),
                        "act_opt": sac.act_opt.state_dict(),
                        "crt1_opt": sac.crt1_opt.state_dict(),
                        "crt2_opt": sac.crt2_opt.state_dict(),
                        "log_alpha": sac.log_alpha,
                        "log_alpha_opt": sac.log_alpha_optimizer.state_dict(),
                    }
                    common.save_checkpoints(frame_idx, checkpoint, save_path, "sac", keep_last=5)
                    if best_reward is None or best_reward < rewards:
                        if best_reward is not None:
                            print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                        best_reward = rewards
                    common.save_best_model(rewards, checkpoint, save_path, f"sac-best-{frame_idx}", keep_best=10)
    pass
