#!/usr/bin/env python3
'''
完成适配，待训练

参考链接：
1.  https://github.com/lutery/R2D2.git

训练记录：
'''
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp

import time
import yaml
import pathlib
import sys
import copy
import os
import pickle
from copy import deepcopy
from tensorboardX import SummaryWriter
import threading

from lib import model, common

import ale_py

gym.register_envs(ale_py)


class Trainer:

    def __init__(self, params, device):
        self.params = params
        self.device = device
        self.num_actors = params['num_actors']
        self.lr = self.params['lr']
        self.eps = self.params['eps']
        self.grad_norm = self.params['grad_norm'] # 梯度裁剪的阈值
        self.num_updates = 0
        self.target_net_update_interval = self.params['target_net_update_interval'] # 目标模型的同步频率
        self.save_interval = self.params['save_interval'] # 模型的保存频率
        self.batched_data = [] # todo

        self.save_path = os.path.join("saves", "r2d2-kungfumaster")
        os.makedirs(self.save_path, exist_ok=True)
        self.writer = SummaryWriter(comment="-" + "kungfulmaster-r2d2")

        self.build_env()
        self.build_model()
        self.build_buffer()


    def build_env(self):
        self.env = common.wrap_dqn(params['env_name'])
        self.test_env = common.wrap_dqn(params['env_name'])  
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.n


    def build_model(self):
        self.model = model.R2D2Network(self.action_shape, self.obs_shape, self.params).to(device=self.device)
        self.model.share_memory()
        self.shared_model = self.model

        self.online_net = ptan.agent.TargetNet(self.model) # 共享模型的在线网络
        self.online_net.target_model.to(self.device) # 目标网络
        self.online_net.target_model.train()
        self.target_net = ptan.agent.TargetNet(self.online_net.target_model) # 模型的拷贝 目标模型
        self.target_net.target_model.eval()
        self.optimizer = torch.optim.Adam(self.online_net.target_model.parameters(), lr=self.lr, eps=self.eps) # 只针对online_net进行优化
        self.loss_fn = nn.MSELoss(reduction='none')


    def build_buffer(self):
        # 看起来要搞多进程训练
        self.sample_queue_list = [mp.Queue() for _ in range(self.num_actors)]
        self.batch_queue = mp.Queue(8)
        self.priority_queue = mp.Queue(8)

        self.buffer = common.ReplayBuffer(self.sample_queue_list, self.batch_queue, self.priority_queue, self.params)
        # 创建动作器的列表
        self.actors = [model.Actor(common.get_epsilon(
            actor_id=i, base_eps=self.params['base_eps'], alpha=self.params['alpha'], num_actors=self.num_actors), 
            self.model, self.sample_queue_list[i], self.params) for i in range(self.num_actors)]
        

         # 将每个动作器分配给一个进程，执行的函数就是actor.run，并且在run里面可以访问actor的成员变量
        actor_procs = [mp.Process(target=actor.run) for actor in self.actors]
        for proc in actor_procs:
            proc.start()


        # 这个run函数是什么的？
        # 子线程，用来处理采集器收集到的样本数据
        # 并送入训练buffer中
        buffer_proc = mp.Process(target=self.buffer.run)
        buffer_proc.start()

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
            self.num_updates = checkpoint["num_updates"]
            self.online_net.target_model.load_state_dict(checkpoint["online_net"])
            self.model.load_state_dict(checkpoint["online_net"])
            self.target_net.target_model.load_state_dict(checkpoint["online_net"])
            self.optimizer.load_state_dict(checkpoint["optimizer"])
            self.buffer.add_frame_idx = checkpoint["add_frame_idx"]

            print("加载模型成功")
            # 打印学习率
            for param_group in self.optimizer.param_groups:
                print("学习率：", param_group['lr'])


    def prepare_data(self):

        while True:
            if not self.batch_queue.empty() and len(self.batched_data) < 4:
                data = self.batch_queue.get_nowait()
                self.batched_data.append(data)
            else:
                time.sleep(0.1)


    def save_model(self):
        checkpoint = {
            "num_updates": self.num_updates,
            "online_net": self.online_net.target_model.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "add_frame_idx": self.buffer.add_frame_idx,
        }

        common.save_checkpoints(self.num_updates, checkpoint, self.save_path, "r2d2", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")


    
    @staticmethod
    def value_rescale(value, eps=1e-3):
        return value.sign()*((value.abs()+1).sqrt()-1) + eps*value

    @staticmethod
    def inverse_value_rescale(value, eps=1e-3):
        temp = ((1 + 4*eps*(value.abs()+1+eps)).sqrt() - 1) / (2*eps)
        return value.sign() * (temp.square() - 1)


    def __train(self):
        background_thread = threading.Thread(target=self.prepare_data, daemon=True)
        background_thread.start()
        time.sleep(2)

        start_time = time.time()
        while self.num_updates < self.params['training_steps']:
            
            while not self.batched_data:
                time.sleep(1)
            data = self.batched_data.pop(0)

            batch_obs, batch_last_action, batch_last_reward, batch_hidden, batch_action, batch_n_step_reward, batch_n_step_gamma, burn_in_steps, learning_steps, forward_steps, idxes, is_weights, old_ptr, env_steps = data
            batch_obs, batch_last_action, batch_last_reward = batch_obs.to(self.device), batch_last_action.to(self.device), batch_last_reward.to(self.device)
            batch_hidden, batch_action = batch_hidden.to(self.device), batch_action.to(self.device)
            batch_n_step_reward, batch_n_step_gamma = batch_n_step_reward.to(self.device), batch_n_step_gamma.to(self.device)
            is_weights = is_weights.to(self.device)

            batch_obs, batch_last_action = batch_obs.float(), batch_last_action.float()
            batch_action = batch_action.long()
            burn_in_steps, learning_steps, forward_steps = burn_in_steps, learning_steps, forward_steps

            # todo 为什么要这么处理batch_hidden
            batch_hidden = (batch_hidden[:1], batch_hidden[1:])

            # 看来是这里将观察值归一化到0-1之间
            batch_obs = batch_obs / 255

            # double q learning
            with torch.no_grad():
                # 用online计算最大q值的动作
                batch_action_ = self.online_net.target_model.calculate_q_(batch_obs, batch_last_action, batch_last_reward, batch_hidden, burn_in_steps, learning_steps, forward_steps).argmax(1).unsqueeze(1)
                # 用target_net计算对应的q值
                batch_q_ = self.target_net.target_model.calculate_q_(batch_obs, batch_last_action, batch_last_reward, batch_hidden, burn_in_steps, learning_steps, forward_steps).gather(1, batch_action_).squeeze(1)
            
            # batch_q_：应该是被缩放过了，所以需要逆缩放，计算bellman的q值
            # 然后再用value_rescale将其缩放到合适的值范围，使得训练更加的稳定
            target_q = self.value_rescale(batch_n_step_reward + batch_n_step_gamma * self.inverse_value_rescale(batch_q_))
            # target_q = batch_n_step_reward + batch_n_step_gamma * batch_q_

            # 在此使用online_net计算当前的q值，而调用的方法和原先的不同之处在于没有传forward_steps
            # todo forward_steps是什么了？
            batch_q = self.online_net.target_model.calculate_q(batch_obs, batch_last_action, batch_last_reward, batch_hidden, burn_in_steps, learning_steps).gather(1, batch_action).squeeze(1)
            
            # 计算损失函数，这里使用的是均方误差损失函数
            # todo is_weights是用来做什么的？
            loss = (is_weights * self.loss_fn(batch_q, target_q)).mean()

            # 计算TD误差,使用的方式是得到target_q和batch_q的差值
            td_errors = (target_q-batch_q).detach().clone().squeeze().abs().cpu().float().numpy()

            # 计算混合TD误差（相差的误差最大值和平均值的加权平均），这里应该是优化样本的权重
            priorities = common.calculate_mixed_td_errors(td_errors, learning_steps.numpy())

            # automatic mixed precision training
            self.optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm_(self.online_net.target_model.parameters(), self.grad_norm)
            self.optimizer.step()

            self.num_updates += 1

            # 更新优先级
            # todo 这里的idxes是什么？
            self.priority_queue.put((idxes, priorities, old_ptr, loss.item()))

            # store new weights in shared memory
            if self.num_updates % 4 == 0:
                # 同步最新的权重到共享模型，使得采集线程可以使用最新的模型进行采集
                self.store_weights()

            # update target net
            if self.num_updates % self.target_net_update_interval == 0:
                # 同步目标网络的权重
                self.target_net.sync()
            
            # save model 
            if self.num_updates % self.save_interval == 0:
                # 保存模型到指定的路径
                self.save_model()


            if self.num_updates % self.params['eval_interval'] == 0:
                self.online_net.target_model.eval()
                self.eval_model(self.online_net.target_model)
                self.online_net.target_model.train()
    
    
    def store_weights(self):
        self.shared_model.load_state_dict(self.online_net.target_model.state_dict())

    
    def train_model(self):
        self.__train()
                

    @torch.no_grad()
    def eval_model(self, model):
        total_reward = 0.0
        total_steps = 0
        for _ in range(10):
            noop_action_count = 0
            pre_action = -1
            obs = self.test_env.reset()[0]
            agent_state = common.AgentState(
                obs=torch.from_numpy(obs).unsqueeze(0),
                action_dim=self.env.action_space.n
            )
            while True:
                q_value, hidden = self.model(agent_state)
                action = q_value.argmax(dim=1).item()
                agent_state.hidden_state = hidden
                agent_state.q_value = q_value
                if action == 0 and pre_action == action:  # Noop
                    noop_action_count += 1
                    if noop_action_count > 30:
                        break
                else:
                    noop_action_count = 0
                pre_action = action
                next_obs, reward, done, trunc, _ = self.test_env.step(action)
                agent_state.update(obs=obs, action=action, reward=reward, done=done, next_obs=next_obs)
                total_reward += reward
                total_steps += 1
                if done or trunc:
                    break
        mean_reward, mean_step = total_reward / 10, total_steps / 10
        common.save_best_model(mean_reward, self.online_net.target_model.state_dict(), self.save_path, "r2d2-best", keep_best=10)
        self.writer.add_scalar("test_reward", mean_reward)
        self.writer.add_scalar("mean_step", mean_step)
        print(f"save best model, current test score: {mean_reward}, mean_step: {mean_step}")



if __name__ == "__main__":
    mp.set_start_method('spawn')
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    parser.add_argument("-n", "--name", default='pendulum', help="Name of the run")
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

     # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/config.yaml').read_text())
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)
    params['seq_len'] = params['burn_in_steps'] + params['learning_steps'] + params['forward_steps']
    params['device'] = device

    trainer = Trainer(params=params, device=device)
    trainer.load_model()
    trainer.train_model()

