#!/usr/bin/env python3
'''
完成适配，训练通过，待验证
参考链接：
1. https://github.com/lutery/pytorch_sac_ae.git
2. ae也是一种特征提取增强器，也可以结合其他强化学习算法，比如rainbow\a2c\ddpg todo 后续的方向


训练记录：
20250430: 测试分数95.14，训练分数94.731（正在不断的上升中），训练通过，待验证
'''
import os
import sys
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
import yaml
import pathlib

from lib import model_ae as model, common_ae as common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)


class Trainer:
    def __init__(self, params, device):

        self.params = params
        self.replay_buffer_capacity = params['replay_buffer_capacity']
        self.hidden_dim = params['hidden_dim']
        self.encoder_feature_dim = params['encoder_feature_dim']
        self.actor_log_std_max = params['actor_log_std_max']
        self.num_layers = params['num_layers']
        self.num_filters = params['num_filters']
        self.encoder_type = params['encoder_type']
        self.init_temperature = params['init_temperature']
        self.actor_lr = params['actor_lr']
        self.actor_beta = params['actor_beta']
        self.critic_lr = params['critic_lr']
        self.critic_beta = params['critic_beta']
        self.alpha_lr = params['alpha_lr']
        self.alpha_beta = params['alpha_beta']
        self.curl_latent_dim = params['curl_latent_dim']
        self.encoder_lr = params['encoder_lr']
        self.replay_initial = params['replay_initial']
        self.batch_size = params['batch_size']
        self.eval_freq = params['eval_freq']
        self.save_freq = params['save_freq']
        self.image_size = params['image_size']
        self.detach_encoder = params['detach_encoder']
        self.discount = params['discount']
        self.actor_update_freq = params['actor_update_freq']
        self.critic_target_update_freq = params['critic_target_update_freq']
        self.cpc_update_freq = params['cpc_update_freq']
        self.log_interval = params['log_interval']
        self.critic_tau = params['critic_tau']
        self.encoder_tau = params['encoder_tau']
        self.gamma = params['gamma']
        self.decoder_type = params['decoder_type']
        self.decoder_lr = params['decoder_lr']
        self.decoder_weight_lambda = params['decoder_weight_lambda']
        self.decoder_update_freq = params['decoder_update_freq']
        self.decoder_latent_lambda = params['decoder_latent_lambda']

        self.device = device
        
        self.save_path = os.path.join("saves", "sac-ae-" + self.params['name'])
        os.makedirs(self.save_path, exist_ok=True)
        # self.save_path_buffer = os.path.join("saves", "sac-ae-" + self.params['name'] + "-buffer")
        # os.makedirs(self.save_path_buffer, exist_ok=True)

        self.frame_idx = 0
        self.train_count = 0
        self.best_reward = float('-inf')

        self.build_env()
        self.build_model()
        self.build_buffer()

    
    def build_buffer(self):
            
        self.writer = SummaryWriter(comment="-sac_curl_" + self.params['name'])
        # 构建DDPG代理
        self.env_sample_agent = common.EnvSampleAgent(env=self.test_env, device=self.device)
        self.net_sample_agent = common.ContinuousRandomAgent(net=lambda obs: self.sample_action(obs), device=self.device)
        self.exp_source = ptan.experience.ExperienceSourceFirstLast(self.env, self.env_sample_agent if self.frame_idx < self.replay_initial else self.net_sample_agent, gamma=self.gamma, steps_count=1)
        self.buffer = ptan.experience.ExperienceReplayBuffer(self.exp_source, buffer_size=self.replay_buffer_capacity)


    def sample_action(self, obs):
        '''
        动作随机采集
        '''
        with torch.no_grad():
            _, pi, _, _ = self.actor(obs, compute_log_pi=False)
            return pi


    def build_env(self):
        # todo dreamerv1可以不使用多帧堆叠，尝试一下
        self.env = common.wrap_dqn(gym.make('MountainCarContinuous-v0', render_mode="rgb_array"))
        self.test_env = common.wrap_dqn(gym.make('MountainCarContinuous-v0', render_mode="rgb_array"))
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.shape


    def build_model(self):
        self.actor = model.Actor(
            obs_shape=self.obs_shape,
            action_shape=self.action_shape,
            hidden_dim=self.hidden_dim,
            encoder_type=self.encoder_type,
            encoder_feature_dim=self.encoder_feature_dim,
            log_std_min=self.params['actor_log_std_min'],
            log_std_max=self.actor_log_std_max,
            num_layers=self.num_layers,
            num_filters=self.num_filters
        ).to(self.device)


        self.critic = model.Critic(
            obs_shape=self.obs_shape,
            action_shape=self.action_shape,
            hidden_dim=self.hidden_dim,
            encoder_type=self.encoder_type,
            encoder_feature_dim=self.encoder_feature_dim,
            num_layers=self.num_layers,
            num_filters=self.num_filters
        ).to(self.device)


        self.critic_target = ptan.agent.TargetNet(self.critic)
        self.actor.encoder.copy_conv_weights_from(self.critic.encoder)

        # todo 这里是如何起作用的
        self.log_alpha = torch.tensor(np.log(self.init_temperature), dtype=torch.float32).to(device)
        self.log_alpha.requires_grad = True
        # set target entropy to -|A|
        self.target_entropy = -np.prod(self.action_shape)
        
        # optimizers
        self.actor_optimizer = torch.optim.Adam(
            self.actor.parameters(), lr=self.actor_lr, betas=(self.actor_beta, 0.999)
        )

        self.critic_optimizer = torch.optim.Adam(
            self.critic.parameters(), lr=self.critic_lr, betas=(self.critic_beta, 0.999)
        )

        self.log_alpha_optimizer = torch.optim.Adam(
            [self.log_alpha], lr=self.alpha_lr, betas=(self.alpha_beta, 0.999)
        )

        # create decoder
        # 构建环境解码层
        self.decoder = model.PixelDecoder(
            self.obs_shape, 
            self.encoder_feature_dim, 
            self.num_layers,
            self.num_filters
        ).to(device)
        self.decoder.apply(common.weight_init)

        # todo 单独构建评价模型的环境编码器的优化器
        # optimizer for critic encoder for reconstruction loss
        self.encoder_optimizer = torch.optim.Adam(
            self.critic.encoder.parameters(), lr=self.encoder_lr
        )

        # 单独构建环境特征解码器的优化器
        # optimizer for decoder
        self.decoder_optimizer = torch.optim.Adam(
            self.decoder.parameters(),
            lr=self.decoder_lr,
            weight_decay=self.decoder_weight_lambda
        )
        
        self.switch_train()
        self.critic_target.target_model.train()
        
    
    def switch_train(self, training=True):
        self.training = training
        self.actor.train(training)
        self.critic.train(training)
        self.decoder.train(training)


    def load_trainer(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
                self.actor.load_state_dict(checkpoint['actor_model'])
                self.critic.load_state_dict(checkpoint['critic_model'])
                self.critic_target.target_model.load_state_dict(checkpoint['critic_target_model'])
                self.decoder.load_state_dict(checkpoint['decoder'])
                self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
                self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer'])
                self.log_alpha_optimizer.load_state_dict(checkpoint['log_alpha_optimizer'])
                self.encoder_optimizer.load_state_dict(checkpoint['encoder_optimizer'])
                self.decoder_optimizer.load_state_dict(checkpoint['decoder_optimizer'])
                self.log_alpha = checkpoint['log_alpha']
                self.target_entropy = checkpoint['target_entropy']
                
                self.frame_idx = checkpoint['frame_idx']
                self.train_count = checkpoint['train_count']
            

            # 暂时先不存储buffer试试
            # checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path_buffer)),
            #                     key=lambda x: int(x.split('_')[-1].split('.')[0]))
            # if len(checkpoints) > 0:
            #     buffer = torch.load(os.path.join(self.save_path_buffer, checkpoints[-1]))
            #     print("加载buffer成功")

            print("加载模型成功")


    def save_trainer(self):
        checkpoints = {
            "actor_model": self.actor.state_dict(),
            "critic_model": self.critic.state_dict(),
            "decoder": self.decoder.state_dict(),
            "critic_target_model": self.critic_target.target_model.state_dict(),
            "actor_optimizer": self.actor_optimizer.state_dict(),
            "critic_optimizer": self.critic_optimizer.state_dict(),
            "log_alpha_optimizer": self.log_alpha_optimizer.state_dict(),
            "encoder_optimizer": self.encoder_optimizer.state_dict(),
            "decoder_optimizer": self.decoder_optimizer.state_dict(),
            "log_alpha": self.log_alpha,
            "target_entropy": self.target_entropy,
            "frame_idx": self.frame_idx,
            "train_count": self.train_count
        }

        common.save_checkpoints(self.train_count, checkpoints, self.save_path, 'sac_curl')
        # 暂时先不存储buffer试试
        # common.save_checkpoints(self.train_count, self.buffer, self.save_path_buffer, 'sac_curl_buffer')


    def collect_seed_episodes(self, tb_tracker, tracker):
        while len(self.buffer) < self.replay_initial:
            self.frame_idx += 1
            self.buffer.populate(1)
            rewards_steps = self.exp_source.pop_rewards_steps()
            if rewards_steps:
                # 记录当前的训练进度并判断是否达到了奖励目标
                rewards, steps = zip(*rewards_steps)
                tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                tracker.reward(rewards[0], self.frame_idx)


    def train(self):
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
                self.collect_seed_episodes(tb_tracker, tracker)
                self.exp_source.agent = self.net_sample_agent

                while True:
                    self.frame_idx += 1
                    self.buffer.populate(1)
                    rewards_steps = self.exp_source.pop_rewards_steps()
                    if rewards_steps:
                        # 记录当前的训练进度并判断是否达到了奖励目标
                        rewards, steps = zip(*rewards_steps)
                        tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                        tracker.reward(rewards[0], self.frame_idx)

                    # 如果步数大于预热步数，则进行训练
                    # 第一次训练则训练init_steps次，是为了智能体快速利用这些数据进行初步学习
                    # 后续每次采样动作后训练一次
                    num_train = self.replay_initial if self.frame_idx == (self.replay_initial + 1) else 1
                    for _ in range(num_train):
                        self.__train_trainer()

                    if self.train_count % self.eval_freq == 0:
                        self.__test_trainer()

                    if self.train_count % self.save_freq == 0:
                        self.save_trainer()

    @property
    def alpha(self):
        return self.log_alpha.exp()


    def __train_critic(self, next_obs, obs, action, reward, not_done):
        '''
        更新评价网络
        '''
        with torch.no_grad():
            _, policy_action, log_pi, _ = self.actor(next_obs)
            # 目标评价网络通过下一个状态和下一个状态的动作评价q值
            target_Q1, target_Q2 = self.critic_target(next_obs, policy_action)
            # 从中选择较小的q值
            target_V = torch.min(target_Q1,
                                 target_Q2) - self.alpha.detach() * log_pi
            # 计算当前obs的q值
            target_Q = reward + (not_done * self.discount * target_V)

        # get current Q estimates
        # 评价网络计算
        # detach_encoder这里False，也就是不分离梯度
        # 得到预测的q值
        current_Q1, current_Q2 = self.critic(
            obs, action)
        # 两个q值需要都接近真实的q值
        critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)


        # Optimize the critic
        # 优化
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        return critic_loss


    def __update_actor_and_alpha(self, obs):
        '''
        param obs: 观察
        param L: 日志
        param step： 步数
        '''
        # detach encoder, so we don't update it with the actor loss
        # 在训练actor时分离梯度，不更新动作的编码器
        _, pi, log_pi, log_std = self.actor(obs, detach_encoder=True)
        actor_Q1, actor_Q2 = self.critic(obs, pi, detach_encoder=True)

        # 得到预测的小梯度
        actor_Q = torch.min(actor_Q1, actor_Q2)
        # todo 预测的对数概率和评价网络的Q值要接近？
        # actor_Q 代表策略能获得的价值,通过最小化actor_Q,使得网络可以选择
        # 最大化Q值的动作
        #  self.alpha.detach() * log_pi 代表对动作熵的鼓励项
        actor_loss = (self.alpha.detach() * log_pi - actor_Q).mean()

        # 记录日志损失、熵
        self.writer.add_scalar('train_actor_loss', actor_loss, self.frame_idx)
        self.writer.add_scalar('train_actor_target_entropy', self.target_entropy, self.frame_idx)
        #得到熵 todo 这里的熵是如何计算的
        entropy = 0.5 * log_std.shape[1] * \
            (1.0 + np.log(2 * np.pi)) + log_std.sum(dim=-1)
        self.writer.add_scalar('train_actor/entropy', entropy.mean(), self.frame_idx)

        # optimize the actor
        # 优化动作网络
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()

        self.log_alpha_optimizer.zero_grad()
        # 这行代码是在计算 温度参数 ( \alpha ) 的损失函数
        # 作用是平衡策略的熵（随机性）和动作价值（利用性）。
        # self.alpha：当前的温度参数 ( \alpha )，通过 self.log_alpha.exp() 计算得到。
        # log_pi：当前策略生成的动作的对数概率，表示策略的熵
        # self.target_entropy：目标熵值，表示希望策略达到的随机性水平。
        # (-log_pi - self.target_entropy)：表示当前策略的熵与目标熵之间的差距。
        # .detach()：防止梯度回传到 log_pi 和 self.target_entropy，因为它们不需要更新
        # 最终，alpha_loss 是一个标量，表示当前温度参数 ( \alpha ) 的优化目标。
        alpha_loss = (self.alpha *
                    (-log_pi - self.target_entropy).detach()).mean()
        self.writer.add_scalar('train_alpha_loss', alpha_loss, self.frame_idx)
        self.writer.add_scalar('train_alpha_value', self.alpha, self.frame_idx)
        alpha_loss.backward()
        self.log_alpha_optimizer.step()


    def __update_decoder(self, obs, target_obs, step):
        '''
        更新特征解码器
        obs和target_obs传入的值是相同的

        学习潜在表示：通过重构损失（reconstruction loss），让编码器提取出能够有效表示观测值的低维特征。
        辅助强化学习：编码器的潜在表示被用于策略网络（Actor）和价值网络（Critic）的输入，从而提升学习效率

        为什么需要 AE（自编码器）？
        在 SAC+AE 中，自编码器的主要目的是：

        处理高维观测值：

        在像素级输入（如图像）环境中，直接使用高维观测值进行强化学习效率较低。
        自编码器通过学习低维潜在表示，降低了输入的维度，同时保留了关键信息。
        提升样本效率：

        自编码器的重构损失提供了额外的监督信号，帮助编码器学习更好的特征表示，从而加速策略和价值网络的学习。
        正则化潜在空间：

        通过对潜在表示添加正则化项，鼓励编码器学习到更平滑、更有结构的特征表示。

        这种方法特别适用于像素级输入的强化学习任务。
        '''
        
        # 采集obs的特征
        h = self.critic.encoder(obs)

        if target_obs.dim() == 4:
            # == 4说明 target_obs 是一个 4D 张量，通常表示图像数据，形状为 (batch_size, channels, height, width)
            # 这种情况下，target_obs 是高维像素数据（如 RGB 图像），需要进行预处理以适配模型的输入要求
            # preprocess images to be in [-0.5, 0.5] range
            target_obs = common.preprocess_obs(target_obs)
        # 对环境进行解码
        rec_obs = self.decoder(h)
        # 解码后的obs要和加了噪声后的目标obs要接近
        rec_loss = F.mse_loss(target_obs, rec_obs)

        # add L2 penalty on latent representation
        # see https://arxiv.org/pdf/1903.12436.pdf
        # L2 偏置
        latent_loss = (0.5 * h.pow(2).sum(1)).mean()

        # 优化环境的特征采样器
        loss = rec_loss + self.decoder_latent_lambda * latent_loss
        self.encoder_optimizer.zero_grad()
        self.decoder_optimizer.zero_grad()
        loss.backward()

        self.encoder_optimizer.step()
        self.decoder_optimizer.step()
        self.writer.add_scalar('train_ae_ae_loss', loss, self.frame_idx)


    def __train_trainer(self):
        batch = self.buffer.sample(self.batch_size)
        states_v, actions_v, rewards_v, dones_mask, not_done_mask, last_states_v = common.unpack_batch_sac_curl(batch, device)
    
        self.__train_critic(last_states_v, states_v, actions_v, rewards_v, not_done_mask)

        if self.frame_idx % self.actor_update_freq == 0:
            # 隔指定的轮数更新actor
            self.__update_actor_and_alpha(states_v)

        if self.frame_idx % self.critic_target_update_freq == 0:
            # 同步评价网络和目标评价网络的参数
            common.soft_update_params(
                self.critic.Q1, self.critic_target.target_model.Q1, self.critic_tau
            )
            common.soft_update_params(
                self.critic.Q2, self.critic_target.target_model.Q2, self.critic_tau
            )
            common.soft_update_params(
                self.critic.encoder, self.critic_target.target_model.encoder,
                self.encoder_tau
            )
        
        # 看起来像素空间有额外的处理
        # 观察空间的采样数据cpc_kwargs["obs_anchor"]
        # 观察空间的采样数据cpc_kwargs["obs_pos"]，两者之间的差异采用的
        # 是不同的随机裁剪采样
        if self.frame_idx % self.decoder_update_freq == 0:
            self.__update_decoder(states_v, states_v, self.frame_idx)
        self.train_count += 1


    @torch.no_grad()
    def __test_net(self, count, device):
        '''
        count: 执行游戏的次数（每次都是执行到游戏结束）

        return: （平均奖励，平均步数）
        '''
        rewards = 0.0
        steps = 0
        for _ in range(count):
            obs, _ = self.test_env.reset()
            while True:
                obs = torch.tensor(obs, dtype=torch.float32, device=device).unsqueeze(0)
                action, _, _, _ = self.actor(obs, compute_pi=False, compute_log_pi=False)
            
                # 然后执行动作得到下一个
                obs, reward, done, truncated, _ = self.test_env.step(action[0].cpu().numpy())
                rewards += reward
                steps += 1
                if done or truncated:
                    break
        return rewards / count, steps / count


    @torch.no_grad()
    def __test_trainer(self):
         # 测试并保存最好测试结果的庶数据
        ts = time.time()
        self.actor.eval()
        rewards, steps = self.__test_net(count=10, device=device)
        self.actor.train()
        print("Train Count %d, Test done in %.2f sec, reward %.3f, steps %d" % (self.train_count,
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.frame_idx)
        self.writer.add_scalar("test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
        
        checkpoints = {
            "actor_model": self.actor.state_dict(),
        }
        common.save_best_model(rewards, checkpoints, self.save_path, 'sac_ae_best')

        


if __name__ == "__main__":
    torch.set_default_dtype(torch.float32)
    np.set_printoptions(precision=8)
    np_float32 = np.float32

    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="mountaincar", help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/mountaincar_configs_sac_ae.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)


    trainer = Trainer(params, device)
    trainer.load_trainer()
    trainer.train()
