#!/usr/bin/env python3
'''
完成适配，待验证
参考链接：
1. https://github.com/lutery/curl-dqn.git
2. curl算是仅仅只是一种增强环境特征采集的方法，可以结合各种具备Target进行特征采集增强


训练记录：
20250509: 测试分数达到了30+分，继续训练
20250512： 继续训练，测试分数251.849分，训练分数42.651分，继续训练
20250513: 测试分数251.8497，训练分数27.287，训练时间不够？继续训练一天
20250514: 测试分数251.8497，训练分数54.880，继续训练
20250519：正在训练
20250521：因为训练终端，没有训练分数，测试分数251.849，没有进步，感觉是中断继续训练有问题，类似kconv，后面排查
暂停训练，因为感觉中断继续训练存在问题，需要排查
20250603: 在腾讯云上重新训练，未调整超参数
20250606: 再腾讯云上训练，测试分数595.02，训练分数337.5，继续训练，是可以训练的，不过可能调整超参数并且看下为什么无法继续训练
20250608: 在腾讯云上训练，测试分数601，训练分数435.215，停止训练，是可以训练的，可能是由于超参数或者缺少学习率调取器导致分数难以继续向上，先暂停训练，看看如何调整
'''
import os
import sys
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
import yaml
import pathlib

from lib import model, common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)


class Trainer:
    def __init__(self, params, device):

        self.params = params
        self.replay_buffer_capacity = params['replay_buffer_capacity']
        self.hidden_dim = params['hidden_dim']
        self.encoder_feature_dim = params['encoder_feature_dim']
        self.actor_log_std_max = params['actor_log_std_max']
        self.num_layers = params['num_layers']
        self.num_filters = params['num_filters']
        self.encoder_type = params['encoder_type']
        self.init_temperature = params['init_temperature']
        self.actor_lr = params['actor_lr']
        self.actor_beta = params['actor_beta']
        self.critic_lr = params['critic_lr']
        self.critic_beta = params['critic_beta']
        self.alpha_lr = params['alpha_lr']
        self.alpha_beta = params['alpha_beta']
        self.curl_latent_dim = params['curl_latent_dim']
        self.encoder_lr = params['encoder_lr']
        self.replay_initial = params['replay_initial']
        self.batch_size = params['batch_size']
        self.eval_freq = params['eval_freq']
        self.save_freq = params['save_freq']
        self.image_size = params['image_size']
        self.detach_encoder = params['detach_encoder']
        self.discount = params['discount']
        self.actor_update_freq = params['actor_update_freq']
        self.critic_target_update_freq = params['critic_target_update_freq']
        self.cpc_update_freq = params['cpc_update_freq']
        self.log_interval = params['log_interval']
        self.critic_tau = params['critic_tau']
        self.encoder_tau = params['encoder_tau']
        self.gamma = params['gamma']

        self.device = device
        
        self.save_path = os.path.join("saves", "sac-curl-" + self.params['name'])
        os.makedirs(self.save_path, exist_ok=True)
        self.save_path_buffer = os.path.join("saves", "sac-curl-" + self.params['name'] + "-buffer")
        os.makedirs(self.save_path_buffer, exist_ok=True)

        self.frame_idx = 0
        self.train_count = 0
        self.best_reward = float('-inf')

        self.build_env()
        self.build_model()
        self.build_buffer()

    
    def build_buffer(self):
            
        self.writer = SummaryWriter(comment="-sac_curl_" + self.params['name'])
        # 构建DDPG代理
        self.env_sample_agent = common.EnvSampleAgent(env=self.test_env, device=self.device)
        self.net_sample_agent = common.ContinuousRandomAgent(net=lambda obs: self.sample_action(obs), device=self.device)
        self.exp_source = ptan.experience.ExperienceSourceFirstLast(self.env, self.env_sample_agent if self.frame_idx < self.replay_initial else self.net_sample_agent, gamma=self.gamma, steps_count=1)
        self.buffer = ptan.experience.ExperienceReplayBuffer(self.exp_source, buffer_size=self.replay_buffer_capacity)


    def sample_action(self, obs):
        '''
        动作随机采集
        # 随机采样动作会对观察进行裁剪
        '''
        if obs.shape[-1] != self.image_size:
            obs = common.center_crop_image(obs, self.image_size)
 
        with torch.no_grad():
            mu, pi, _, _ = self.actor(obs, compute_log_pi=False)
            return pi


    def build_env(self):
        # todo dreamerv1可以不使用多帧堆叠，尝试一下
        self.env = common.wrap_dqn(gym.make('CarRacing-v3', render_mode="rgb_array", lap_complete_percent=0.95, domain_randomize=True, continuous=True))
        self.test_env = common.wrap_dqn(gym.make('CarRacing-v3', render_mode="rgb_array", lap_complete_percent=0.95, domain_randomize=True, continuous=True))
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.shape


    def build_model(self):
        self.actor = model.Actor(
            obs_shape=self.obs_shape,
            action_shape=self.action_shape,
            hidden_dim=self.hidden_dim,
            encoder_type=self.encoder_type,
            encoder_feature_dim=self.encoder_feature_dim,
            log_std_min=self.params['actor_log_std_min'],
            log_std_max=self.actor_log_std_max,
            num_layers=self.num_layers,
            num_filters=self.num_filters
        ).to(self.device)


        self.critic = model.Critic(
            obs_shape=self.obs_shape,
            action_shape=self.action_shape,
            hidden_dim=self.hidden_dim,
            encoder_type=self.encoder_type,
            encoder_feature_dim=self.encoder_feature_dim,
            num_layers=self.num_layers,
            num_filters=self.num_filters
        ).to(self.device)


        self.critic_target = ptan.agent.TargetNet(self.critic)
        self.actor.encoder.copy_conv_weights_from(self.critic.encoder)

        # todo 这里是如何起作用的
        self.log_alpha = torch.tensor(np.log(self.init_temperature), dtype=torch.float32).to(device)
        self.log_alpha.requires_grad = True
        # set target entropy to -|A|
        self.target_entropy = -np.prod(self.action_shape)
        
        # optimizers
        self.actor_optimizer = torch.optim.Adam(
            self.actor.parameters(), lr=self.actor_lr, betas=(self.actor_beta, 0.999)
        )

        self.critic_optimizer = torch.optim.Adam(
            self.critic.parameters(), lr=self.critic_lr, betas=(self.critic_beta, 0.999)
        )

        self.log_alpha_optimizer = torch.optim.Adam(
            [self.log_alpha], lr=self.alpha_lr, betas=(self.alpha_beta, 0.999)
        )

        # 如果编码器类型是像素
        if self.encoder_type == 'pixel':
            # create CURL encoder (the 128 batch size is probably unnecessary)
            # 则创建CURL编码器
            self.CURL = model.CURL(self.obs_shape, self.encoder_feature_dim,
                        self.curl_latent_dim, self.critic,self.critic_target, output_type='continuous').to(self.device)

            # optimizer for critic encoder for reconstruction loss
            # 单独对评价网络的环境编码器进行优化
            self.encoder_optimizer = torch.optim.Adam(
                self.critic.encoder.parameters(), lr=self.encoder_lr
            )

            # 创建curl的优化器
            self.cpc_optimizer = torch.optim.Adam(
                self.CURL.parameters(), lr=self.encoder_lr
            )
        else:
            raise ValueError("Encoder type not supported")
        
        self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
        self.switch_train()
        self.critic_target.target_model.train()
        
    

    def switch_train(self, training=True):
        self.actor.train(training)
        self.critic.train(training)
        self.CURL.train(training)



    def load_trainer(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
                self.actor.load_state_dict(checkpoint['actor_model'])
                self.critic.load_state_dict(checkpoint['critic_model'])
                self.critic_target.target_model.load_state_dict(checkpoint['critic_target_model'])
                self.CURL.load_state_dict(checkpoint['CURL'])
                self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
                self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer'])
                self.log_alpha_optimizer.load_state_dict(checkpoint['log_alpha_optimizer'])
                self.encoder_optimizer.load_state_dict(checkpoint['encoder_optimizer'])
                self.cpc_optimizer.load_state_dict(checkpoint['cpc_optimizer'])
                self.log_alpha = checkpoint['log_alpha']
                self.target_entropy = checkpoint['target_entropy']
                
                self.frame_idx = checkpoint['frame_idx']
                self.train_count = checkpoint['train_count']
            

            # 暂时先不存储buffer试试
            # checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path_buffer)),
            #                     key=lambda x: int(x.split('_')[-1].split('.')[0]))
            # if len(checkpoints) > 0:
            #     buffer = torch.load(os.path.join(self.save_path_buffer, checkpoints[-1]))
            #     print("加载buffer成功")

            print("加载模型成功")


    def save_trainer(self):
        checkpoints = {
            "actor_model": self.actor.state_dict(),
            "critic_model": self.critic.state_dict(),
            "CURL": self.CURL.state_dict(),
            "critic_target_model": self.critic_target.target_model.state_dict(),
            "actor_optimizer": self.actor_optimizer.state_dict(),
            "critic_optimizer": self.critic_optimizer.state_dict(),
            "log_alpha_optimizer": self.log_alpha_optimizer.state_dict(),
            "encoder_optimizer": self.encoder_optimizer.state_dict(),
            "cpc_optimizer": self.cpc_optimizer.state_dict(),
            "log_alpha": self.log_alpha,
            "target_entropy": self.target_entropy,
            "frame_idx": self.frame_idx,
            "train_count": self.train_count
        }

        common.save_checkpoints(self.train_count, checkpoints, self.save_path, 'sac_curl')
        # 暂时先不存储buffer试试
        # common.save_checkpoints(self.train_count, self.buffer, self.save_path_buffer, 'sac_curl_buffer')


    def collect_seed_episodes(self, tb_tracker, tracker):
        while len(self.buffer) < self.replay_initial:
            self.frame_idx += 1
            self.buffer.populate(1)
            rewards_steps = self.exp_source.pop_rewards_steps()
            if rewards_steps:
                # 记录当前的训练进度并判断是否达到了奖励目标
                rewards, steps = zip(*rewards_steps)
                tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                tracker.reward(rewards[0], self.frame_idx)


    def train(self):
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
                self.collect_seed_episodes(tb_tracker, tracker)
                self.exp_source.agent = self.net_sample_agent

                while True:
                    self.frame_idx += 1
                    self.buffer.populate(1)
                    rewards_steps = self.exp_source.pop_rewards_steps()
                    if rewards_steps:
                        # 记录当前的训练进度并判断是否达到了奖励目标
                        rewards, steps = zip(*rewards_steps)
                        tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                        tracker.reward(rewards[0], self.frame_idx)

                    
                    self.__train_trainer()

                    if self.train_count % self.eval_freq == 0:
                       self.__test_trainer()

                    if self.train_count % self.save_freq == 0:
                        self.save_trainer()

    @property
    def alpha(self):
        return self.log_alpha.exp()


    def __train_critic(self, next_obs, obs, action, reward, not_done):
        '''
        更新评价网络
        '''
        with torch.no_grad():
            _, policy_action, log_pi, _ = self.actor(next_obs)
            # 目标评价网络通过下一个状态和下一个状态的动作评价q值
            target_Q1, target_Q2 = self.critic_target(next_obs, policy_action)
            # 从中选择较小的q值
            target_V = torch.min(target_Q1,
                                 target_Q2) - self.alpha.detach() * log_pi
            # 计算当前obs的q值
            target_Q = reward + (not_done * self.discount * target_V)

        # get current Q estimates
        # 评价网络计算
        # detach_encoder这里False，也就是不分离梯度
        # 得到预测的q值
        current_Q1, current_Q2 = self.critic(
            obs, action, detach_encoder=self.detach_encoder)
        # 两个q值需要都接近真实的q值
        critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)


        # Optimize the critic
        # 优化
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()


    def __update_actor_and_alpha(self, obs):
        '''
        param obs: 观察
        param L: 日志
        param step： 步数
        '''
        # detach encoder, so we don't update it with the actor loss
        # 在训练actor时分离梯度，不更新动作的编码器
        _, pi, log_pi, log_std = self.actor(obs, detach_encoder=True)
        actor_Q1, actor_Q2 = self.critic(obs, pi, detach_encoder=True)

        # 得到预测的小梯度
        actor_Q = torch.min(actor_Q1, actor_Q2)
        # todo 预测的对数概率和评价网络的Q值要接近？
        # actor_Q 代表策略能获得的价值,通过最小化actor_Q,使得网络可以选择
        # 最大化Q值的动作
        #  self.alpha.detach() * log_pi 代表对动作熵的鼓励项
        actor_loss = (self.alpha.detach() * log_pi - actor_Q).mean()

        if self.frame_idx % self.log_interval == 0:
            # 记录日志损失、熵
            self.writer.add_scalar('train_actor_loss', actor_loss, self.frame_idx)
            self.writer.add_scalar('train_actor_target_entropy', self.target_entropy, self.frame_idx)
        #得到熵 todo 这里的熵是如何计算的
        entropy = 0.5 * log_std.shape[1] * \
            (1.0 + np.log(2 * np.pi)) + log_std.sum(dim=-1)
        if self.frame_idx % self.log_interval == 0:                                    
            self.writer.add_scalar('train_actor/entropy', entropy.mean(), self.frame_idx)

        # optimize the actor
        # 优化动作网络
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()

        self.log_alpha_optimizer.zero_grad()
        # todo 这里的损失是什么？
        alpha_loss = (self.alpha *
                    (-log_pi - self.target_entropy).detach()).mean()
        if self.frame_idx % self.log_interval == 0:
            self.writer.add_scalar('train_alpha_loss', alpha_loss, self.frame_idx)
            self.writer.add_scalar('train_alpha_value', self.alpha, self.frame_idx)
        alpha_loss.backward()
        self.log_alpha_optimizer.step()


    def __train_cpc(self, obs_anchor, obs_pos):
        
        # 通过CURL计算两个不同随机采样的观察之间的差异
        z_a = self.CURL.encode(obs_anchor)
        z_pos = self.CURL.encode(obs_pos, ema=True)
        
        logits = self.CURL.compute_logits(z_a, z_pos)
        # 也是和另一个CURL是一个顺序的labels todo 为啥 看md文件
        # 这里实际上是仿造了一个one-hot的标签
        # logits时产生一个(B,B)的矩阵，其中对角线时正样本
        # 如：
        # 
        '''
        [[ 10.0,  -5.0,  -5.0,  -5.0],  
            [ -5.0,  10.0,  -5.0,  -5.0],
            [ -5.0,  -5.0,  10.0,  -5.0],
            [ -5.0,  -5.0,  -5.0,  10.0]]

        我们期望的就是对角线的值越大越好，非对角线的值越小越好，这样z_a
        和z_pos之间的差异就越小
        使得评价模型能够学习到更加准确的特征表示，从而提高模型的准确率和性能
        '''
        labels = torch.arange(logits.shape[0]).long().to(self.device)
        loss = self.cross_entropy_loss(logits, labels)
        
        self.encoder_optimizer.zero_grad()
        self.cpc_optimizer.zero_grad()
        loss.backward()

        self.encoder_optimizer.step()
        self.cpc_optimizer.step()
        if self.frame_idx % self.log_interval == 0:
            self.writer.add_scalar('train_curl_loss', loss, self.frame_idx)



    def __train_trainer(self):
        batch = self.buffer.sample(self.batch_size)
        states_v, obs_anchor_v, obs_pos_v, actions_v, rewards_v, dones_mask, not_done_mask, last_states_v = common.unpack_batch_sac_curl(batch, device)
    
        self.__train_critic(last_states_v, states_v, actions_v, rewards_v, not_done_mask)

        if self.frame_idx % self.actor_update_freq == 0:
            # 隔指定的轮数更新actor
            self.__update_actor_and_alpha(states_v)

        if self.frame_idx % self.critic_target_update_freq == 0:
            # 同步评价网络和目标评价网络的参数
            common.soft_update_params(
                self.critic.Q1, self.critic_target.target_model.Q1, self.critic_tau
            )
            common.soft_update_params(
                self.critic.Q2, self.critic_target.target_model.Q2, self.critic_tau
            )
            common.soft_update_params(
                self.critic.encoder, self.critic_target.target_model.encoder,
                self.encoder_tau
            )
        
        # 看起来像素空间有额外的处理
        # 观察空间的采样数据cpc_kwargs["obs_anchor"]
        # 观察空间的采样数据cpc_kwargs["obs_pos"]，两者之间的差异采用的
        # 是不同的随机裁剪采样
        self.__train_cpc(obs_anchor_v, obs_pos_v)
        self.train_count += 1


    @torch.no_grad()
    def __test_net(self, count, device):
        '''
        count: 执行游戏的次数（每次都是执行到游戏结束）

        return: （平均奖励，平均步数）
        '''
        rewards = 0.0
        steps = 0
        for _ in range(count):
            obs, _ = self.test_env.reset()
            while True:
                obs = torch.tensor(obs, dtype=torch.float32, device=device).unsqueeze(0)
                obs = common.center_crop_image(obs, self.image_size)
                action, _, _, _ = self.actor(obs, compute_pi=False, compute_log_pi=False)
            
                # 然后执行动作得到下一个
                obs, reward, done, truncated, _ = self.test_env.step(action[0].cpu().numpy())
                rewards += reward
                steps += 1
                if done or truncated:
                    break
        return rewards / count, steps / count


    @torch.no_grad()
    def __test_trainer(self):
         # 测试并保存最好测试结果的庶数据
        ts = time.time()
        self.actor.eval()
        rewards, steps = self.__test_net(count=10, device=device)
        self.actor.train()
        print("Test done in %.2f sec, reward %.3f, steps %d" % (
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.frame_idx)
        self.writer.add_scalar("test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
        
        checkpoints = {
            "actor_model": self.actor.state_dict(),
        }
        common.save_best_model(rewards, checkpoints, self.save_path, 'sac_curl_best')

        


if __name__ == "__main__":
    torch.set_default_dtype(torch.float32)
    np.set_printoptions(precision=8)
    np_float32 = np.float32

    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="carracing", help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/carracing_configs_sac_curl.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)


    trainer = Trainer(params, device)
    trainer.load_trainer()
    trainer.train()
