#!/usr/bin/env python3
'''
完成适配，待验证
参考链接：
1. https://github.com/lutery/slac.pytorch.git 
2. https://github.com/alexlee-gk/slac
3. https://alexlee-gk.github.io/slac/
根据latent的策略，应该是可以迁移到离散动作的
todo 
1. 尝试迁移到离散动作


训练记录：
'''
import os
import sys
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
import yaml
import pathlib

from lib import model_slac as model, common_slac as common

import torch


class Trainer:
    def __init__(self, params, device):

        self.params = params
        self.num_steps = params['num_steps']
        self.domain_name = params['domain_name']
        self.action_repeat = params['action_repeat']
        self.device = device
        self.gamma = 0.99 # 折扣因子
        self.batch_size_sac = 256 # SAC的批量大小
        self.batch_size_latent = 32 # Latent的批量大小
        self.buffer_size = 10 ** 5 # 缓冲区大小
        self.num_sequences = 8 # 序列长度
        self.lr_sac = 3e-4 # SAC的学习率
        self.lr_latent = 1e-4 # Latent的学习率
        self.feature_dim = 256 # 特征维度
        self.z1_dim = 32 # z1的维度 todo
        self.z2_dim = 256 # z2的维度 todo
        self.hidden_units = (256, 256) # 隐藏层单元数 todo
        self.tau = 5e-3 # 软更新的参数
        self.initial_collection_steps = 10 ** 4 # 初始化缓冲区的训练步数
        self.initial_learning_steps = 10 ** 5 # 初始化潜在空间的训练步数
        self.eval_interval=500 # todo
        self.save_interval=100 # todo
        self.num_eval_episodes = 5 # todo 
        self.learning_steps_sac = 0
        self.learning_steps_latent = 0
        
        self.save_path = os.path.join("saves", "slac-" + self.params['name'])
        os.makedirs(self.save_path, exist_ok=True)

        self.frame_idx = 0
        self.train_count = 0
        self.best_reward = float('-inf')

        self.build_env()
        self.build_model()
        self.build_buffer()

    
    def build_buffer(self):
            
        self.writer = SummaryWriter(comment="-sac_slac_" + self.params['name'])
        # 构建DDPG代理
        self.env_sample_agent = common.EnvSampleAgent(env=self.test_env, device=self.device)
        self.net_sample_agent = common.SlacRandomAgent(net=lambda obs: self.sample_action(obs), obs_shape=self.obs_shape, action_shape=self.action_shape, num_sequences=self.num_sequences, device=self.device)
        self.exp_source = ptan.experience.ExperienceSourceRAW(self.env, self.env_sample_agent if self.frame_idx < self.initial_collection_steps else self.net_sample_agent, steps_count=1)
        self.buffer = ptan.experience.ExperienceReplayChunkBuffer(self.exp_source, buffer_size=self.buffer_size)


    def preprocess(self, ob):
        '''
        对环境进行预处理

        ob.state 不是tensor，应该是numpy
        由于ob时SlacObservation，所以.state拿到的是一个序列状态
        '''
        # 这里也进行了一次归一化，因为这里传入的不是缓冲区的数据
        state = torch.tensor(ob.state, dtype=torch.uint8, device=self.device).float().div_(255.0)
        with torch.no_grad():
            # 对环境进行特征提取
            feature = self.latent.encoder(state).view(1, -1)
        # 拿到每个观察对应的动作
        action = torch.tensor(ob.action, dtype=torch.float, device=self.device)
        # 观察特征，动作 todo 增加shape查看
        feature_action = torch.cat([feature, action], dim=1)
        return feature_action


    def sample_action(self, obs):
        '''
        动作随机采集
        '''
        '''
        param ob: SlacObservation
        根据环境观察得到动作
        '''
        # 预处理，得到（观察特征_动作） todo 这个动作是什么动作
        feature_action = self.preprocess(obs)
        with torch.no_grad():
            # 根据特征得到预测的动作（添加了噪声）
            action = self.actor.sample(feature_action)[0]
        return action.cpu().numpy()[0]



    def build_env(self):
        # todo dreamerv1可以不使用多帧堆叠，尝试一下
        self.env = common.wrap_dqn(gym.make(self.domain_name, render_mode="rgb_array"), stack_frames=1, action_repeat=self.action_repeat)
        self.test_env = common.wrap_dqn(gym.make(self.domain_name, render_mode="rgb_array"), stack_frames=1, action_repeat=self.action_repeat)
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.shape
        # Observations for training and evaluation.
        # t这两个观察是什么作用？用于采集数据和评估模型时传入的ob
        self.ob_test = common.SlacObservation(self.env.observation_space.shape, self.env.action_space.shape, self.num_sequences)


    def build_model(self):
        # 动作策略网络
        self.actor = model.GaussianPolicy(self.action_shape, self.num_sequences, self.feature_dim, self.hidden_units).to(self.device)
        # 这里应该是sac的评价网络模型
        self.critic = model.TwinnedQNetwork(self.action_shape, self.z1_dim, self.z2_dim, self.hidden_units).to(self.device)
        self.critic_target = model.TwinnedQNetwork(self.action_shape, self.z1_dim, self.z2_dim, self.hidden_units).to(self.device)
        # 这里是潜在模型，应该就是slac的特有，环境特征的采集和解码应该就是这里进行吧 todo
        self.latent = model.LatentModel(self.obs_shape, self.action_shape, self.feature_dim, self.z1_dim, self.z2_dim, self.hidden_units).to(self.device)
        # 将self.critic的参数完全拷贝到self.critic_target
        common.soft_update(self.critic_target, self.critic, 1.0)
        # 设置目标网络的参数不需要更新 梯度设置为false
        common.grad_false(self.critic_target)

        self.target_entropy = -float(self.action_shape[0])
        # We optimize log(alpha) because alpha is always bigger than 0. 
        # todo 作用，设置为0.0
        self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
        with torch.no_grad():
            # 这里是干嘛todo
            self.alpha = self.log_alpha.exp()

        # Optimizers. 创建优化器
        self.optim_actor = torch.optim.Adam(self.actor.parameters(), lr=self.lr_sac)
        self.optim_critic = torch.optim.Adam(self.critic.parameters(), lr=self.lr_sac)
        self.optim_alpha = torch.optim.Adam([self.log_alpha], lr=self.lr_sac)
        self.optim_latent = torch.optim.Adam(self.latent.parameters(), lr=self.lr_latent)

                # JIT compile to speed up. todo 作用
        fake_feature = torch.empty(1, self.num_sequences + 1, self.feature_dim, device=self.device)
        fake_action = torch.empty(1, self.num_sequences, self.action_shape[0], device=self.device)
        # 通过跟踪模型的前向计算路径来生成 TorchScript，具体作用是？todo
        self.create_feature_actions = torch.jit.trace(common.create_feature_actions, (fake_feature, fake_action))



    def load_trainer(self):
        # 增加加载模型的代码
        # todo 考虑保存buffer的状态
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
                self.actor.load_state_dict(checkpoint["actor_model"])
                self.critic.load_state_dict(checkpoint["critic_model"])
                self.critic_target.load_state_dict(checkpoint["critic_target_model"])
                self.latent.load_state_dict(checkpoint["latent_model"])
                self.optim_actor.load_state_dict(checkpoint["actor_optimizer"])
                self.optim_critic.load_state_dict(checkpoint["critic_optimizer"])
                self.optim_alpha.load_state_dict(checkpoint["log_alpha_optimizer"])
                self.optim_latent.load_state_dict(checkpoint["latent_optimizer"])
                self.log_alpha = checkpoint["log_alpha"]
                self.alpha = checkpoint["alpha"]
                self.target_entropy = checkpoint["target_entropy"]
                self.frame_idx = checkpoint["frame_idx"]
                self.train_count = checkpoint["train_count"]
                self.learning_steps_sac = checkpoint["learning_steps_sac"]
                self.learning_steps_latent = checkpoint["learning_steps_latent"]

            print("加载模型成功")


    def save_trainer(self):
        checkpoints = {
            "actor_model": self.actor.state_dict(),
            "critic_model": self.critic.state_dict(),
            "critic_target_model": self.critic_target.state_dict(),
            "latent_model": self.latent.state_dict(),
            "actor_optimizer": self.optim_actor.state_dict(),
            "critic_optimizer": self.optim_critic.state_dict(),
            "log_alpha_optimizer": self.optim_alpha.state_dict(),
            "latent_optimizer": self.optim_latent.state_dict(),
            "log_alpha": self.log_alpha,
            "alpha": self.alpha,
            "target_entropy": self.target_entropy,
            "frame_idx": self.frame_idx,
            "train_count": self.train_count,
            "learning_steps_sac": self.learning_steps_sac,
            "learning_steps_latent": self.learning_steps_latent,
        }

        common.save_checkpoints(self.train_count, checkpoints, self.save_path, 'sac_slac')


    def collect_seed_episodes(self, tb_tracker, tracker):
        print("开始预热缓冲区")
        while len(self.buffer) < self.initial_collection_steps:
            self.frame_idx += 1
            self.buffer.populate(1)
            rewards_steps = self.exp_source.pop_rewards_steps()
            if rewards_steps:
                # 记录当前的训练进度并判断是否达到了奖励目标
                rewards, steps = zip(*rewards_steps)
                tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                tracker.reward(rewards[0], self.frame_idx)

        print("预热缓冲区完成")


    def update_latent(self):
        '''
        更新潜在空间模型
        用于模拟环境的潜在动态、动作解码器、奖励解码器
        '''
        self.learning_steps_latent += 1
        batch = self.buffer.sample(self.batch_size_latent, self.num_sequences + 1)
        state_, action_, reward_, done_ = common.unpack_latent_batch(batch, self.batch_size_latent, self.num_sequences, self.obs_shape, self.action_shape, self.device)
        loss_kld, loss_image, loss_reward = self.latent.calculate_loss(state_, action_, reward_, done_)

        self.optim_latent.zero_grad()
        (loss_kld + loss_image + loss_reward).backward()
        self.optim_latent.step()

        if self.learning_steps_latent % 1000 == 0:
            # 这里是记录训练的损失
            self.writer.add_scalar("loss/kld", loss_kld.item(), self.learning_steps_latent)
            self.writer.add_scalar("loss/reward", loss_reward.item(), self.learning_steps_latent)
            self.writer.add_scalar("loss/image", loss_image.item(), self.learning_steps_latent)


    def update_sac(self):
        self.learning_steps_sac += 1
        # state_ shape (batch_size, num_sequences + 1, *state_shape) t
        # action_ shape (batch_size, num_sequences, *action_shape)
        # reward_ shape (batch_size, 1)
        # done_ shape (batch_size, 1)

        batch = self.buffer.sample(self.batch_size_sac, self.num_sequences + 1)
        state_, action_, reward_, done_ = common.unpack_sac_batch(batch, self.batch_size_sac, self.num_sequences, self.obs_shape, self.action_shape, self.device)
        z, next_z, action, feature_action, next_feature_action = self.prepare_batch(state_, action_)

        self.update_critic(z, next_z, action, next_feature_action, reward_, done_)
        self.update_actor(z, feature_action)
        # 将训练后的参数更新到目标网络
        common.soft_update(self.critic_target, self.critic, self.tau)


    def update_actor(self, z, feature_action):
        '''
        param z: 当前时刻的潜在空间特征
        param feature_action: 当前时刻的特征-动作对
        '''
        # 根据当前时刻的潜在空间特征和特征-动作对，得到当前时刻的动作
        action, log_pi = self.actor.sample(feature_action)
        # 预测当前时刻的Q值
        q1, q2 = self.critic(z, action)
        # 利用最小化负q值来寻找使得q值更大的动作
        # 同时log_pi是动作的对数概率（负的熵），它越小
        # -log_pi.detach().mean()时，这实际上是在计算策略的熵。越大的熵意味着策略越随机
        # 这里的loss_actor是最小化q值和最大化熵的组合
        # 看定义时，这里self.alpha没有计算梯度，所以这里不会去更新self.alpha的梯度
        # 这里主要是更新动作策略网络
        loss_actor = -torch.mean(torch.min(q1, q2) - self.alpha * log_pi)

        self.optim_actor.zero_grad()
        loss_actor.backward(retain_graph=False)
        self.optim_actor.step()

        with torch.no_grad():
            # 这里计算的熵就是上面的- self.alpha * log_pi只不过mean在外面
            # entropy = -log_pi.detach().mean() 计算策略的当前熵值
            entropy = -log_pi.detach().mean()
        # 计算温度参数的损失
        # 这里不是算熵的损失，而是算温度参数的损失
        # 这个损失函数的设计目的是自动调节 alpha，使策略的熵值接近目标熵值。
        # self.alpha就是从self.log_alpha中计算出来的
        '''
        当 entropy < target_entropy（策略过于确定性）时：

        target_entropy - entropy > 0
        导致 loss_alpha < 0（因为有负号）
        最小化 loss_alpha 会使 log_alpha 增加，从而使 alpha 增加
        更高的 alpha 会在策略更新时更强调熵最大化，增加策略的随机性
        当 entropy > target_entropy（策略过于随机）时：

        target_entropy - entropy < 0
        导致 loss_alpha > 0
        最小化 loss_alpha 会使 log_alpha 减少，从而使 alpha 减少
        较低的 alpha 会减弱熵的影响，让策略更专注于 Q 值最大化

        entropy 与 target_entropy 靠近的影响
        当 entropy 接近 target_entropy 时：

        平衡探索和利用：

        策略既不会过于确定性（可能陷入局部最优）
        也不会过于随机（无法有效利用已学知识）
        自适应调节：

        alpha 会自动稳定在一个适当的值
        在训练初期，alpha 通常较大，促进探索
        随着学习进行，alpha 往往会减小，更加专注于利用
        任务适应性：

        不同任务所需的最优熵水平是不同的
        自动调节使 SAC 能够适应各种不同复杂度的任务
        '''
        loss_alpha = -self.log_alpha * (self.target_entropy - entropy)

        self.optim_alpha.zero_grad()
        loss_alpha.backward(retain_graph=False)
        self.optim_alpha.step()
        with torch.no_grad():
            self.alpha = self.log_alpha.exp()

        if self.learning_steps_sac % 1000 == 0:
            self.writer.add_scalar("loss/actor", loss_actor.item(), self.learning_steps_sac)
            self.writer.add_scalar("loss/alpha", loss_alpha.item(), self.learning_steps_sac)
            self.writer.add_scalar("stats/alpha", self.alpha.item(), self.learning_steps_sac)
            self.writer.add_scalar("stats/entropy", entropy.item(), self.learning_steps_sac)


    def prepare_batch(self, state_, action_):
        '''
        预处理sac训练的数据
        # state_ shape (batch_size, num_sequences + 1, *state_shape) t
        # action_ shape (batch_size, num_sequences, *action_shape)

        prepare_batch 是 SLAC（Stochastic Latent Actor-Critic）算法中的关键函数，用于将原始观察和动作序列转换为适合 SAC（Soft Actor-Critic）算法训练的格式。它处理时序数据并提取潜在表示，构建强化学习所需的状态-动作对
        '''
        with torch.no_grad():
            # f(1:t+1) 对观察进行特征提取
            # feature shape (batch_size, num_sequences + 1, feature_dim)
            feature_ = self.latent.encoder(state_)
            # z(1:t+1) 对观察进行潜在空间采样，并cat先验特征和后验特征
            # 形状为 (batch_size, num_sequences + 1, z1_dim + z2_dim)
            z_ = torch.cat(self.latent.sample_posterior(feature_, action_)[2:4], dim=-1)

        # z(t), z(t+1) 倒数第二个和最后一个时间步的潜在空间特征
        # 这两个状态对是强化学习中的关键元素，用于计算当前状态的Q值和下一状态的目标Q值 todo 查看shape
        z, next_z = z_[:, -2], z_[:, -1]
        # a(t) 最后一个时间步的动作 todo
        action = action_[:, -1]
        # fa(t)=(x(1:t), a(1:t-1)), fa(t+1)=(x(2:t+1), a(2:t)) 这里就是在拼接当前状态和上一个动作的特征 创建特征-动作对
        # feature_action: 用于策略网络输入，包含时间 1:t 的观察特征和时间 1:t-1 的动作
        # next_feature_action: 用于计算下一时刻的动作，包含时间 2:t+1 的观察特征和时间 2:t 的动作
        feature_action, next_feature_action = self.create_feature_actions(feature_, action_)

        # todo 补充shape
        return z, next_z, action, feature_action, next_feature_action


    def update_critic(self, z, next_z, action, next_feature_action, reward, done):
        '''
        param z: 当前时刻的潜在空间特征
        param next_z: 下一个时刻的潜在空间特征
        param action: 当前时刻的动作
        param next_feature_action: 下一个时刻的特征-动作对
        param reward: 当前时刻的奖励
        param done: 当前时刻的结束标识
        '''
        curr_q1, curr_q2 = self.critic(z, action)
        with torch.no_grad():
            next_action, log_pi = self.actor.sample(next_feature_action)
            # 下一个时刻的潜在状态特征和动作预测下一个状体的Q值
            next_q1, next_q2 = self.critic_target(next_z, next_action)
            # 使用sac的q值来计算目标q值
            # self.alpha * log_pi 是熵正则化项
            # log_pi 是动作的对数概率（负的熵）
            # self.alpha 是温度参数，控制探索与利用的平衡
            # 减去这一项意味着鼓励策略具有高熵（更多样化的行为）
            # 这是SAC的核心公式之一，实现了"最大熵强化学习"原则 todo 查看sac的其他的代码
            # 传统Q值仅考虑奖励，而SAC的目标函数加入了最大化策略熵的项
            # 最终目标是找到既能获得高奖励又能保持高探索性的策略
            next_q = torch.min(next_q1, next_q2) - self.alpha * log_pi
        # 计算目标q值
        target_q = reward + (1.0 - done) * self.gamma * next_q
        # 计算当前q值和目标q值的损失
        loss_critic = (curr_q1 - target_q).pow_(2).mean() + (curr_q2 - target_q).pow_(2).mean()

        self.optim_critic.zero_grad()
        loss_critic.backward(retain_graph=False)
        self.optim_critic.step()

        if self.learning_steps_sac % 1000 == 0:
            self.writer.add_scalar("loss/critic", loss_critic.item(), self.learning_steps_sac)


    def train(self):
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
                self.collect_seed_episodes(tb_tracker, tracker)
                self.exp_source.agent = self.net_sample_agent

                # todo 这里可以考虑每次重新训练都训练
                if self.train_count < self.initial_learning_steps:
                    bar = tqdm(range(self.initial_learning_steps))
                    for _ in bar:
                        bar.set_description("Updating latent variable model.")
                        self.update_latent()
                        self.train_count += 1


                while True:
                    self.frame_idx += 1
                    self.buffer.populate(1)
                    rewards_steps = self.exp_source.pop_rewards_steps()
                    if rewards_steps:
                        # 记录当前的训练进度并判断是否达到了奖励目标
                        rewards, steps = zip(*rewards_steps)
                        tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                        tracker.reward(rewards[0], self.frame_idx)

                    self.__train_trainer()

                    if self.train_count % self.eval_interval == 0:
                        self.__test_trainer()

                    if self.train_count % self.save_interval == 0:
                        self.save_trainer()


    def __train_trainer(self):
        self.update_latent()
        self.update_sac()

        self.train_count += 1 
    

    @torch.no_grad()
    def __test_net(self, count, device='cpu'):
        '''
        count: 执行游戏的次数（每次都是执行到游戏结束）

        return: （平均奖励，平均步数）
        '''
        rewards = 0.0
        steps = 0
        for _ in range(count):
            obs, _ = self.test_env.reset()
            self.ob_test.reset_episode(state=obs)
            while True:                
                feature_action = self.preprocess(self.ob_test)
                with torch.no_grad():
                    action = self.actor(feature_action).cpu().numpy()[0]
            
                # 然后执行动作得到下一个
                obs, reward, done, truncated, _ = self.test_env.step(action)
                self.ob_test.append(state=obs, action=action)
                rewards += reward
                steps += 1
                if done or truncated:
                    break
        return rewards / count, steps / count


    @torch.no_grad()
    def __test_trainer(self):
         # 测试并保存最好测试结果的庶数据
        ts = time.time()
        self.actor.eval()
        rewards, steps = self.__test_net(count=10, device=self.device)
        self.actor.train()
        print("Train Count %d, Test done in %.2f sec, reward %.3f, steps %d" % (self.train_count,
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.frame_idx)
        self.writer.add_scalar("test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
        
        checkpoints = {
            "actor_model": self.actor.state_dict(),
        }
        common.save_best_model(rewards, checkpoints, self.save_path, 'slac_best')

        


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="invertPendulum", help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/pendulum_configs_slac.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)


    trainer = Trainer(params, device)
    trainer.load_trainer()
    trainer.train()
