#!/usr/bin/env python3
'''
完成适配

参考链接：https://github.com/lutery/drq.git

训练记录：
'''
import gymnasium as gym
import ptan
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp

import time
import yaml
import pathlib
import sys
import ptan
import os
import time
from copy import deepcopy
from tensorboardX import SummaryWriter
import hydra

from lib_drq import model, common
from lib_drq.logger import Logger

import ale_py
from lib_drq.video import VideoRecorder

gym.register_envs(ale_py)


class Trainer:

    def __init__(self, params, device):
        # 以执行的目录作为主目录
        self.work_dir = os.getcwd()
        print(f'workspace: {self.work_dir}')
        self.params = params
        self.device = device
        self.frame_idx = 0
        self.best_reward = None

        # 日志类
        self.logger = Logger(self.work_dir,
                             save_tb=self.params.log_save_tb,
                             log_frequency=self.params.log_frequency_step,
                             agent=self.params.train_name,
                             action_repeat=self.params.action_repeat)

        self.save_path = os.path.join("saves", "drq", self.params.train_name)
        os.makedirs(self.save_path, exist_ok=True)

        self.build_env()
        self.build_model()
        self.build_buffer()

        self.video_recorder = VideoRecorder(
            self.work_dir if self.params.save_video else None)


    def build_env(self):
        self.env = common.wrap_dqn(gym.make(self.params.env, render_mode="rgb_array"), stack_frames=self.params.frame_stack, action_repeat=self.params.action_repeat, obs_size=self.params.image_size)
        self.test_env = common.wrap_dqn(gym.make(self.params.env, render_mode="rgb_array"), stack_frames=self.params.frame_stack, action_repeat=self.params.action_repeat, obs_size=self.params.image_size)
        self.params.agent.obs_shape = self.env.observation_space.shape
        self.params.agent.action_shape = self.env.action_space.shape
        self.params.agent.action_range = [
            float(self.env.action_space.low.min()),
            float(self.env.action_space.high.max())
        ]

        # 同时也要设置到子组件配置上
        self.params.encoder_config.obs_shape = self.env.observation_space.shape
        self.params.critic_config.action_shape = self.env.action_space.shape
        self.params.actor_config.action_shape = self.env.action_space.shape
            


    def build_model(self):
        self.agent = hydra.utils.instantiate(self.params.agent)


    def build_buffer(self):
        self.env_agent = common.DrqAgent(self.params, self.agent, self.env, self.device)
        self.env_agent.step_count = self.frame_idx
        self.experience = ptan.experience.ExperienceSourceRAW(self.env, self.env_agent, steps_count=1)
        self.replay_buffer = ptan.experience.ExperienceReplayBuffer(self.experience, buffer_size=self.params.replay_buffer_capacity)

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[2].split('.')[0]))

            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
                self.agent.load_state(checkpoint['agent'])
                self.frame_idx = checkpoint['frame_idx']
                self.params = checkpoint['params']
                print("加载模型成功")


    def save_model(self):
        checkpoint = {
            "frame_idx": self.frame_idx,
            "params": self.params,
            "agent": self.agent.state_dict(),
        }

        common.save_checkpoints(self.frame_idx, checkpoint, self.save_path, "drq", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")
    

    
    def train_model(self):
        with common.RewardTracker(self.logger, stop_reward=99999) as tracker:
            while True:
                self.replay_buffer.populate(1)
                rewards_steps = self.experience.pop_rewards_steps()
                if rewards_steps:
                    rewards, steps = zip(*rewards_steps)
                    self.logger.log("train/episode_steps", np.mean(steps), self.frame_idx)
                    tracker.reward(np.mean(rewards), self.frame_idx)


                if len(self.replay_buffer) < self.params.num_seed_steps:
                    continue

                self.agent.update(self.replay_buffer, self.logger, self.frame_idx)
                self.frame_idx += 1
                # 修复调用错误
                if self.frame_idx % 10 == 0:  # 每10次训练测试一次
                    self.test_model()
                    self.save_model()


    def test_model(self):
        ts = time.time()
        self.agent.train(False)
        rewards, steps = Trainer.eval_model(self.agent.act, self.test_env, self.video_recorder, count=10, device=self.device)
        self.agent.train(True)
        print("Test done in %.2f sec, reward %.3f, steps %d" % (
            time.time() - ts, rewards, steps))
        self.logger.log("eval/test_reward", rewards, self.frame_idx)
        self.logger.log("eval/test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
            common.save_best_model(rewards, self.agent.state_dict(), self.save_path, 'drq')

        print(f"save best model, current test score: {rewards}, mean_step: {steps}")


    @torch.no_grad()
    @staticmethod
    def eval_model(net, env, video_recorder, count=10, device="cpu"):
        rewards = 0.0
        steps = 0
        for episode in range(count):
            video_recorder.init(enabled=(episode == 0))
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
                action = net(obs_v, sample=False)
                obs, reward, done, trunc, _ = env.step(action[0])
                video_recorder.record(env)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
            video_recorder.save(f"{time.time()}.mp4")
        return rewards / count, steps / count


@hydra.main(config_path="./config_drq", config_name="config")
def main(cfg):
    device = common.select_device(gpu=cfg.gpu)
    cfg.device = common.select_device_str(gpu=cfg.gpu)

    trainer = Trainer(params=cfg, device=device)
    trainer.load_model()
    trainer.train_model()



if __name__ == "__main__":
    mp.set_start_method('spawn')
    main()
