#!/usr/bin/env python3
'''
正在适配
但是需要考虑重构，以灵活适配不同维度长度的动作

参考链接： https://github.com/lutery/self-supervised-rl.git 中 main_embedding_simple_move_td3.py

训练记录：
'''
from email import policy
import gym  # 使用gym而不是gymnasium
import gym_hybrid  # 导入gym_hybrid以注册Sliding-v0环境
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp

import time
import yaml
import pathlib
import sys
import copy
import os
import pickle
from copy import deepcopy
from tensorboardX import SummaryWriter
import threading

from lib import model, common

import ale_py
from gym.wrappers.normalize import RunningMeanStd

# gym.register_envs(ale_py)  # 不需要这行，ale_py会自动注册


class Trainer:

    def __init__(self, params, device):
        self.params = params
        self.device = device
        self.tau = self.params.tau
        self.discount = self.params.discount
        self.max_steps = 30 # 每个游戏的最大步数
        self.total_reward = 0. # 所有游戏回合的总奖励
        self.returns = [] # 记录每轮游戏的总回报
        self.train_step = 0
        self.success = [] # 记录每轮的游戏是否成功或者结束
        self.recon_s_loss = [] # 记录重建观察损失
        self.learning_rate_qactor = 0.001
        self.learning_rate_continuous_actor = 0.0001
        self.vae_batch_size = 64
        self.embed_lr = 1e-4
        self.internal = 100


        self.frame_idx = 0
        self.train_vae_count = 0
        self.best_reward = 0
        self.c_rate, self.recon_s = 0, 0

        self.save_path = os.path.join("saves", "hyar_sliding_td3_", params.name)
        os.makedirs(self.save_path, exist_ok=True)
        self.writer = SummaryWriter(comment="-" + "hyar_sliding_td3_" + params.name)
        self.logger = common.setup_logger(self.save_path)

        self.build_env()
        self.build_model()
        self.build_buffer()


    def build_env(self):
        self.env = common.wrap_dqn('Sliding-v0')
        self.test_env = common.wrap_dqn('Sliding-v0', episodic_life=False)  
        self.obs_shape = self.env.observation_space.shape
        self.state_dim = self.obs_shape[0]
        self.discrete_action_dim = self.env.action_space[0].n # 离散动作的维度
        self.continuous_action_dim = self.env.action_space[1].shape[0] # 连续动作的维度
        self.discrete_emb_dim = self.discrete_action_dim * 2
        self.continuous_emb_dim = self.continuous_action_dim * 2
        # todo 先自己根据环境写死，后续再改成自动获取
        self.max_action = 1.0
        print("state_dim", self.obs_shape)
        print("discrete_action_dim", self.discrete_action_dim)
        print("continuous_action_dim", self.continuous_action_dim)
        self.kwargs = {
            "state_dim": self.state_dim,
            "discrete_emb_dim": self.discrete_emb_dim,
            "continuous_emb_dim": self.continuous_emb_dim,
            "max_action": self.max_action,
            "discount": self.discount,
            "tau": self.tau,
            "device": self.device,
        }


    def build_model(self):
        # Target policy smoothing is scaled wrt the action scale
        self.kwargs["policy_noise"] = self.params.policy_noise * self.max_action
        self.kwargs["noise_clip"] = self.params.noise_clip * self.max_action
        self.kwargs["policy_freq"] = self.params.policy_freq
        self.policy_model = model.TD3(**self.kwargs)
        print(self.policy_model)

        # embedding初始部分
        self.action_rep = model.Action_representation(state_dim=self.state_dim,
                                                                  action_dim=self.discrete_action_dim,
                                                                  continuous_action_dim=2,
                                                                  reduced_action_dim=self.discrete_emb_dim,
                                                                  reduce_continuous_action_dim=self.continuous_emb_dim,
                                                                  device=self.device)
        print(self.action_rep)
        self.action_rep_target = ptan.agent.TargetNet(self.action_rep)

        # 构造了一个离散动作的Q值预测网络
        actor_kwargs={'hidden_layers': [256, 256, 128, 64],
                      'action_input_layer': 0, }
        self.qactor = model.QActor(self.state_dim, self.discrete_action_dim, self.continuous_action_dim, **actor_kwargs).to(self.device)
        self.actor_target = ptan.agent.TargetNet(self.qactor, requre_grad=False)
        self.actor_target.alpha_sync(0.0)

        # 构造一个连续动作的动作预测网络
        actor_continuous_kwargs={'hidden_layers': [256, 256, 128, 64],
                            'squashing_function': False,
                            'output_layer_init_std': 0.0001, }
        self.continuous_actor = model.ContinuousActor(self.state_dim, self.discrete_action_dim, self.continuous_action_dim, **actor_continuous_kwargs).to(device)
        self.continuous_actor_target = ptan.agent.TargetNet(self.continuous_actor, requre_grad=False)
        self.continuous_actor_target.alpha_sync(0.0)

        # Original DDPG paper [Lillicrap et al. 2016] used a weight decay of 0.01 for Q (critic)
        # but setting weight_decay=0.01 on the critic_optimiser seems to perform worse...
        # using AMSgrad ("fixed" version of Adam, amsgrad=True) doesn't seem to help either...
        self.actor_optimiser = optim.Adam(self.qactor.parameters(), lr=self.learning_rate_qactor) #, betas=(0.95, 0.999))
        self.actor_param_optimiser = optim.Adam(self.continuous_actor.parameters(), lr=self.learning_rate_continuous_actor) #, betas=(0.95, 0.999)) #, weight_decay=critic_l2_reg)


    def build_buffer(self):
        self.agent = common.HyArTd3Agent(self.params, 
                                         self.qactor, 
                                         self.continuous_actor, 
                                         self.policy_model, 
                                         self.action_rep,
                                         self.discrete_action_dim, 
                                         self.continuous_action_dim, 
                                         self.discrete_emb_dim,
                                         self.continuous_emb_dim,
                                         self.max_action, 
                                         self.env, 
                                         device=self.device)
        self.exp_source = ptan.experience.ExperienceSourceRAW(self.env, self.agent, steps_count=1)
        self.replay_buffer = common.HyArTd3ReplayBuffer(self.exp_source, int(1e6), device=self.device)

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))

            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
                self.c_rate = checkpoint["c_rate"]
                self.recon_s = checkpoint["recon_s"]
                self.frame_idx = checkpoint["frame_idx"] 
                self.best_reward = checkpoint["best_reward"]
                self.train_vae_count = checkpoint["train_vae_count"]
                self.action_rep.load_state_dict(checkpoint["action_rep"])
                self.action_rep_target.target_model.load_state_dict(checkpoint["action_rep_target"])
                self.qactor.load_state_dict(checkpoint["qactor"])
                self.actor_target.target_model.load_state_dict(checkpoint["qactor_target"])
                self.continuous_actor.load_state_dict(checkpoint["continuous_actor"])
                self.continuous_actor_target.target_model.load_state_dict(checkpoint["continuous_actor_target"])
                self.actor_optimiser.load_state_dict(checkpoint["actor_optimiser"])
                self.actor_param_optimiser.load_state_dict(checkpoint["actor_param_optimiser"])
                self.replay_buffer.load_state_dict(checkpoint["replay_buffer"])
                self.policy_model.load_state_dict(checkpoint["policy_model"])
                self.agent.c_rate = self.c_rate
                self.agent.compute_emb = checkpoint["compute_emb"]  # 训练期间都进行动作嵌入的计算
                self.agent.cur_step = checkpoint["cur_step"]
                print(f"加载模型成功, frame_idx: {self.frame_idx}, best_reward: {self.best_reward}, train_vae_count: {self.train_vae_count}")
            else:
                print("没有找到模型，重新训练")


    def save_model(self):
        checkpoint = {
            "c_rate": self.c_rate,
            "recon_s": self.recon_s,
            "frame_idx": self.frame_idx, 
            "best_reward": self.best_reward,
            "action_rep": self.action_rep.state_dict(),
            "train_vae_count": self.train_vae_count,
            "action_rep_target": self.action_rep_target.target_model.state_dict(),
            "qactor": self.qactor.state_dict(),
            "qactor_target": self.actor_target.target_model.state_dict(),
            "continuous_actor": self.continuous_actor.state_dict(),
            "continuous_actor_target": self.continuous_actor_target.target_model.state_dict(),
            "actor_optimiser": self.actor_optimiser.state_dict(),
            "actor_param_optimiser": self.actor_param_optimiser.state_dict(),
            "replay_buffer": self.replay_buffer.state_dict(),
            "policy_model": self.policy_model.state_dict(),
            "compute_emb": self.agent.compute_emb,  # 训练期间都进行动作嵌入的计算
            "cur_step": self.agent.cur_step,
        }

        common.save_checkpoints(self.frame_idx, checkpoint, self.save_path, "bootstrapped_dqn", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")


    def __train_vae(self, tracker):
        # 开始采样 ，训练的轮数
        for _ in range(5000):
            self.replay_buffer.populate(1) # 采样一步
            self.frame_idx += 1
            new_end_infos = self.exp_source.pop_rewards_steps()
            if new_end_infos:
                if tracker.reward(new_end_infos, self.frame_idx):
                    break

        self.c_rate, self.recon_s = self.__inner_vae_train(action_rep=self.action_rep, train_step=5000, replay_buffer=self.replay_buffer,
                                batch_size=self.vae_batch_size,
                                save_dir=self.save_path, vae_save_model=True, embed_lr=self.embed_lr, tracker=tracker)
        tracker.writer.add_scalar("recon_s", self.recon_s, self.frame_idx)
        self.agent.c_rate = self.c_rate
        print("discrete embedding", self.action_rep.discrete_embedding())
        print("c_rate", self.c_rate)
        print("recon_s", self.recon_s)


    def __inner_vae_train(self, action_rep, train_step, replay_buffer, batch_size, save_dir, vae_save_model, embed_lr, tracker=None):
        '''
        action_rep: action representation model todo
        train_step: 采样的总轮数，控制训练的轮数
        replay_buffer: 缓冲区
        batch_size: 训练batch
        save_dir: 存储的目录
        vae_save_model: 是否保存vae模型
        embed_lr: embed学习率
        '''
        initial_losses = [] # 保存每次训练的损失均值
        for counter in range(int(train_step) + 10):
            losses = [] # 这里估计是为了方便后续能够计算loss mean平均值
            state, discrete_action, parameter_action, _, _, _, _, state_next_state, _, _ = replay_buffer.sample(
                batch_size)
            # 完成VAE重建模型的训练，vae重建损失、观察变化损失、连续动作重建损失、KL约束散度损失 以上损失都只是标量值，估计只是为了记录
            vae_loss, recon_loss_s, recon_loss_c, KL_loss = action_rep.unsupervised_loss(state,
                                                                                        discrete_action.reshape(1,
                                                                                                                -1).squeeze().long(),
                                                                                        parameter_action,
                                                                                        state_next_state,
                                                                                        batch_size, embed_lr)
            losses.append(vae_loss)
            initial_losses.append(np.mean(losses))

            if counter % 100 == 0 and counter >= 100:
                # 每100次打印损失训练过程以及最近50次的损失均值
                # print("load discrete embedding", action_rep.discrete_embedding())
                print("vae_loss, recon_loss_s, recon_loss_c, KL_loss", vae_loss, recon_loss_s, recon_loss_c, KL_loss)
                print("Epoch {} loss:: {}".format(counter, np.mean(initial_losses[-50:])))
                if tracker is not None:
                    tracker.writer.add_scalar("vae_loss", np.mean(initial_losses[-50:]), self.train_vae_count * (int(train_step) + 10) + counter)
                    tracker.writer.add_scalar("recon_loss_s", recon_loss_s, self.train_vae_count * (int(train_step) + 10) + counter)
                    tracker.writer.add_scalar("recon_loss_c", recon_loss_c, self.train_vae_count * (int(train_step) + 10) + counter)
                    tracker.writer.add_scalar("KL_loss", KL_loss, self.train_vae_count * (int(train_step) + 10) + counter)

            # Terminate initial phase once action representations have converged.
            # len(initial_losses) >= train_step：确保至少训练了train_step轮（在代码中通常是5000轮）
            # np.mean(initial_losses[-5:]) + 1e-5 >= np.mean(initial_losses[-10:])：最近5次的平均损失 vs 最近10次的平均损失、添加小的容忍度 1e-5 避免数值精度问题
            # 如果损失还在下降那么最近5次的损失一定小于最近10次的损失，说明模型还在学习
            # 如果损失稳定了，那么最近5次加上一个小值则肯定大于最近10次的损失，则退出训练
            if len(initial_losses) >= train_step and np.mean(initial_losses[-5:]) + 1e-5 >= np.mean(initial_losses[-10:]):
                # print("vae_loss, recon_loss_s, recon_loss_c, KL_loss", vae_loss, recon_loss_s, recon_loss_c, KL_loss)
                # print("Epoch {} loss:: {}".format(counter, np.mean(initial_losses[-50:])))
                # print("Converged...", len(initial_losses))
                break
        
        # 直接采样5000个样本
        state_, discrete_action_, parameter_action_, _, _, _, _, state_next_state_, _, _ = replay_buffer.sample(
            batch_size=5000)
        # 离散动作潜在空间的边界范围、重建观察差值损失
        c_rate, recon_s = action_rep.get_c_rate(state_, discrete_action_.reshape(1, -1).squeeze().long(), parameter_action_,
                                                state_next_state_, batch_size=5000, range_rate=2)
        
        self.train_vae_count += 1
        return c_rate, recon_s

    
    def train_model(self):
        with common.RewardTracker(self.writer, stop_reward=99999) as tracker:
            while True: # 训练的总时间步数
                if self.train_vae_count == 0:
                    self.__train_vae(tracker=tracker)

                # -------TD3训练------
                print("TD3 train")
                self.agent.compute_emb = True # 训练期间都进行动作嵌入的计算
                self.agent.cur_step = 0

                self.replay_buffer.populate(1)
                self.frame_idx += 1
                new_end_infos = self.exp_source.pop_rewards_steps()
                if new_end_infos:
                    if tracker.reward(new_end_infos, self.frame_idx):
                        break
                

                if self.frame_idx >= self.params.start_timesteps: # 只有在达到预设的开始训练步数后，才进行策略网络的训练
                    self.policy_model.train(self.replay_buffer, self.action_rep, self.c_rate, self.recon_s,
                                                                                self.params.batch_size)
                    self.save_model()
                    

                if self.frame_idx % self.params.eval_freq == 0:
                    self.test_model()


                # if t % 1000 == 0 and t >= 1000:
                if self.frame_idx % self.internal == 0 and self.frame_idx >= 1000:
                    # 这里还要继续训练vae，防止灾难性遗忘
                    # print("表征调整")
                    # print("vae train")
                    self.c_rate, self.recon_s = self.__inner_vae_train(action_rep=self.action_rep, train_step=5000, replay_buffer=self.replay_buffer,
                                        batch_size=self.vae_batch_size,
                                        save_dir=self.save_path, vae_save_model=True, embed_lr=self.embed_lr, tracker=tracker)
                    tracker.writer.add_scalar("recon_s", self.recon_s, self.frame_idx)
                    self.agent.c_rate = self.c_rate


    def test_model(self):
        ts = time.time()
        self.policy_model.eval()
        self.action_rep.eval()
        rewards, steps = Trainer.eval_model(self.policy_model, self.action_rep, self.test_env, self.c_rate, count=10, device=self.device)
        self.policy_model.train()
        self.action_rep.train()
        self.logger.info("Test done in %.2f sec, reward %.3f, steps %d" % (
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.frame_idx)
        self.writer.add_scalar("test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                self.logger.info("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
            state_dict = {
                "policy_model": self.policy_model.test_state_dict(),
                "action_rep": self.action_rep.state_dict(),
            }
            common.save_best_model(rewards, state_dict, self.save_path, 'bootstrapped_dqn_best')

        self.logger.info(f"save best model, current test score: {rewards}, mean_step: {steps}")


    @torch.no_grad()
    @staticmethod
    def eval_model(policy_model, action_rep, env, c_rate, count=10, device="cpu"):
        rewards = 0.0
        steps = 0
        # same_action_count = 0
        # pre_action = None
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                discrete_emb, continuous_emb = policy_model.select_action(obs)
                # todo 为什么需要这个true_parameter_action
                true_continuous_emb = common.true_parameter_action(continuous_emb, c_rate)
                # select discrete action
                discrete_action_embedding = copy.deepcopy(discrete_emb)
                # 根据嵌入计算最接近的离散动作，再根据离散动作得到离散动作的嵌入
                discrete_action_embedding = torch.from_numpy(discrete_action_embedding).float().reshape(1, -1)
                discrete_action = action_rep.select_discrete_action(discrete_action_embedding)
                discrete_emb_1 = action_rep.get_embedding(discrete_action).cpu().view(-1).data.numpy()
                # 根据状态、预测的连续嵌入、离散动作得到对应的连续动作值
                all_continuous_action = action_rep.select_parameter_action(obs, true_continuous_emb,
                                                                        discrete_emb_1)
                continuous_action = all_continuous_action
                # 将预测的真实离散动作和连续动作拼接成环境可识别的动作
                action = (discrete_action, continuous_action)
                # if pre_action == action:
                #     same_action_count += 1
                #     if same_action_count > 100:
                #         break
                # else:
                #     same_action_count = 0
                #     pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
        return rewards / count, steps / count



if __name__ == "__main__":
    parser = common.build_parser()
    args = parser.parse_args()
    device = common.select_device(args=args)

    trainer = Trainer(params=args, device=device)
    trainer.load_model()
    trainer.train_model()

