#!/usr/bin/env python3
'''
完成适配

参考链接：https://github.com/lutery/drqv2.git

训练记录：
'''
import gymnasium as gym
import ptan
import numpy as np

import torch
import torch.multiprocessing as mp

import time
import ptan
import os
import time
import hydra

from lib_drq2 import model, common
from lib_drq2.logger import Logger
from lib_drq2.video import VideoRecorder, TrainVideoRecorder


class DrqExperienceEpisodeeplayBuffer(ptan.experience.ExperienceEpisodeeplayBuffer):

    def __init__(self, discount, nstep, train_video_recorder, experience_source, epsilon_size, exisode_length=300, d_type=torch.float32, device='cpu'):
        super().__init__(experience_source, epsilon_size, exisode_length, d_type, device)
        self._discount = discount
        self._nstep = nstep
        self._train_video_recorder = train_video_recorder
        # self._train_video_recorder.init()
     
    def _sample(self, ep_idx, start_idx):
        obs = self.buffer[ep_idx][start_idx][0][0]
        action = self.buffer[ep_idx][start_idx][0][1]
        next_obs = self.buffer[ep_idx][start_idx][0][4]
        reward = np.zeros_like(self.buffer[ep_idx][start_idx][0][2])
        discount = np.ones_like(reward)
        for i in range(self._nstep):
            step_reward = self.buffer[ep_idx][start_idx + i][0][2]
            reward += discount * step_reward # 将n步之间的所有奖励按照折扣相加
            discount *= 1 * self._discount # 计算每步的折扣比例
        return (obs, action, reward, discount, next_obs)

    def sample(self, batch_size, nstep):
        '''
        这里传入的batch_size，采样batch_size个样本数据
        '''
        # 随机选择batch_size个episode
        sampled_indices = np.random.choice(len(self.buffer), batch_size, replace=True)
        # chunked_episodes看起来也是一个连续的片段
        chunked_episodes = list()
        for ep_idx in sampled_indices:
            # 随机选择一个起始位置
            start_idx = np.random.randint(low=0, high=len(self.buffer[ep_idx]) - nstep)
            # 将选择连续的chunk_length个数据添加到chunked_episodes中
            chunked_episodes.append(self._sample(ep_idx, start_idx))
        # 此时chunked_episodes是一个list，里面存储的是每个episode的连续片段
        return chunked_episodes
    

    def _add(self, sample):
        super()._add(sample)

        # if sample[0][3]:
        #     self._train_video_recorder.save(f"{time.time()}.mp4")
        #     self._train_video_recorder.init(sample[0])
        # else:
        #     self._train_video_recorder.record(sample[0])


class Trainer:

    def __init__(self, params, device):
        # 以执行的目录作为主目录
        self.work_dir = os.getcwd()
        print(f'workspace: {self.work_dir}')
        self.params = params
        self.device = device
        self.frame_idx = 0
        self.best_reward = None
        self.metrics = None

        # 日志类
        self.logger = Logger(self.work_dir, use_tb=self.params.use_tb)
        # 构建视频记录器，记录验证时的视频
        self.video_recorder = VideoRecorder(
        self.work_dir if self.params.save_video else None)
        # 记录训练过程中的视频
        self.train_video_recorder = TrainVideoRecorder(
            self.work_dir if self.params.save_train_video else None)

        self.save_path = os.path.join("saves", "drq", self.params.task_name)
        os.makedirs(self.save_path, exist_ok=True)

        self.build_env()
        self.build_model()
        self.build_buffer()
        
        self.timer = common.Timer()
        self._global_step = 0
        self._global_episode = 0 # 记录训练了多少个完整的游戏过程
        
    
    @property
    def global_step(self):
        return self.frame_idx

    @property
    def global_episode(self):
        return self._global_episode

    @property
    def global_frame(self):
        '''
        返回游戏经过的总帧数
        '''
        return self.global_step * 1 #self.params.action_repeat


    def build_env(self):
        self.env = common.wrap_dqn(gym.make(self.params.task_name, render_mode="rgb_array", domain_randomize=True, continuous=True))
        self.test_env = common.wrap_dqn(gym.make(self.params.task_name, render_mode="rgb_array", continuous=True))
        self.params.agent.action_shape = self.env.action_space.shape
        self.params.agent.obs_shape = self.env.observation_space.shape
            


    def build_model(self):
        '''
        obs_spec: 环境的观察空间规格
        action_spec: 环境的动作空间规格
        cfg: agent 的配置参数
        '''
        self.agent = hydra.utils.instantiate(self.params.agent) # drqv2.DrQV2Agent


    def build_buffer(self):
        self.env_agent = common.DrqAgent(self.params, self.agent, self.env, self.device)
        self.env_agent.step_count = self.frame_idx
        self.experience = ptan.experience.ExperienceSourceRAW(self.env, self.env_agent, steps_count=1)
        self.replay_buffer = DrqExperienceEpisodeeplayBuffer(self.params.discount, self.params.nstep, self.train_video_recorder, self.experience, epsilon_size=self.params.replay_episode_size)
        # predicates todo 这是做啥的？
        self.train_until_step = common.Until(self.params.num_train_frames, # 总训练帧数 作用: 控制整个训练过程的总长度
                                       self.params.action_repeat)
        self.seed_until_step = common.Until(self.params.num_seed_frames, # 随机探索帧数 作用: 在训练初期进行纯随机探索，收集初始经验，只有超过这个帧数才开始更新网络
                                      self.params.action_repeat)
        self.eval_every_step = common.Every(self.params.eval_every_frames, # 评估频率 作用: 控制多久进行一次模型评估
                                      self.params.action_repeat)

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[2].split('.')[0]))

            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
                for k, v in checkpoint.items(): # 并将数据恢复到当前的训练状态中
                    self.__dict__[k] = v
                print("加载模型成功")


    def save_model(self):
        # 记录要保存的成员变量
        keys_to_save = ['agent', 'timer', '_global_step', '_global_episode']
        # 获取要保存的成员变量的值，并保存到文件中
        checkpoint = {k: self.__dict__[k] for k in keys_to_save}

        common.save_checkpoints(self.frame_idx, checkpoint, self.save_path, "drq", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")
    


    def __inner_train_model(self):
        # try to update the agent
        if not self.seed_until_step(self.global_step):
            train_batch = self.replay_buffer.sample(self.params.batch_size, self.params.nstep)
            # 只有超过 seed 阶段才开始网络更新
            metrics = self.agent.update(train_batch, self.global_step)
            self.logger.log_metrics(metrics, self.global_frame, ty='train')



    
    def train_model(self):
        with common.RewardTracker(self.logger, stop_reward=99999) as tracker:
            while self.train_until_step(self.global_step):
                self.replay_buffer.populate(1)
                rewards_steps = self.experience.pop_rewards_steps()
                if rewards_steps:
                    self._global_episode += 1
                    rewards, steps = zip(*rewards_steps)
                    self.logger.log("train/episode_steps", np.mean(steps), self.global_step)
                    tracker.reward(np.mean(rewards), self.global_step)
                    if self.metrics is not None:
                        # log stats 
                        # elapsed_time - 从上次记录到现在经过的时间
                        # total_time - 从训练开始到现在的总时间
                        for episode_reward, episode_step in rewards_steps:
                            elapsed_time, total_time = self.timer.reset()
                            episode_frame = episode_step * self.params.action_repeat # 当前游戏过程的总帧数，因为每个动作会被重复执行多次，也会经过多帧渲染
                            with self.logger.log_and_dump_ctx(self.global_frame,
                                                            ty='train') as log:
                                # 记录本轮训练的各种指标
                                log('fps', episode_frame / elapsed_time)
                                log('total_time', total_time)
                                log('episode_reward', episode_reward)
                                log('episode_length', episode_frame)
                                log('episode', self.global_episode)
                                # log('buffer_size', len(self.replay_storage))
                                log('step', self.global_step)
                    self.save_model()


                if self.eval_every_step(self.global_step):
                    self.logger.log('eval_total_time', self.timer.total_time(),
                                self.global_frame)
                    self.test_model()

                self.__inner_train_model()
                self.frame_idx += 1


    def test_model(self):
        ts = time.time()
        self.agent.train(False)
        rewards, steps = Trainer.eval_model(self.agent, self.test_env, self.video_recorder, self.global_step, count=10, device=self.device)
        self.agent.train(True)
        print("Test done in %.2f sec, reward %.3f, steps %d" % (
            time.time() - ts, rewards, steps))
        self.logger.log("eval/test_reward", rewards, self.frame_idx)
        self.logger.log("eval/test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
            common.save_best_model(rewards, self.agent, self.save_path, 'drq')

        print(f"save best model, current test score: {rewards}, mean_step: {steps}")


    @torch.no_grad()
    @staticmethod
    def eval_model(net, env, video_recorder, global_step, count=10, device="cpu"):
        rewards = 0.0
        steps = 0
        for episode in range(count):
            # video_recorder.init(env, enabled=(episode == 0))
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
                action = net.act(obs_v, global_step, eval_mode=True)
                obs, reward, done, trunc, _ = env.step(action[0])
                # video_recorder.record(obs)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
            # video_recorder.save(f"{time.time()}.mp4")
        return rewards / count, steps / count


@hydra.main(config_path="./config_drq2", config_name="config")
def main(cfg):
    device = common.select_device_str(gpu=cfg.gpu)
    cfg.device = device

    trainer = Trainer(params=cfg, device=device)
    trainer.load_model()
    trainer.train_model()



if __name__ == "__main__":
    mp.set_start_method('spawn')
    main()
