#!/usr/bin/env python3
'''
完成适配
唯一不同就是在训练时，梯度累积时，特征提取网络会重复累积，所以需要除以n_ensemble，但是动作预测头不需要除以n_ensemble，所以需要调整
参考链接： https://github.com/lutery/bootsrapped-dqn.git 中 dqn_fruit.py

训练记录：
'''
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp

import time
import yaml
import pathlib
import sys
import copy
import os
import pickle
from copy import deepcopy
from tensorboardX import SummaryWriter
import threading

from lib import model, common

import ale_py
from gym.wrappers.normalize import RunningMeanStd

gym.register_envs(ale_py)


class Trainer:

    def __init__(self, params, device):
        self.params = params
        self.device = device
        self.memory_size = params.memory_size 
        self.update_freq = params.update_freq # 训练更新模型的频率
        self.learn_start = params.learn_start # 模型正式训练起始步数，估计有一段是再收集数据
        self.history_size = params.history_size

        self.batch_size = params.batch_size
        self.ep = params.ep # epsilon-贪婪策略中的初始epsilon
        self.eps_end = params.eps_end # epsilon-贪婪策略中的最小epsilon
        self.eps_endt = params.eps_endt # epsilon-贪婪策略中的epsilon衰减步数，也即是多少步后epsilon衰减到最小值
        self.eps_start = self.learn_start

        self.lr = params.lr
        self.discount = params.discount

        self.agent_type = params.agent_type
        self.max_steps = params.max_steps # 训练的最大步数
        self.eval_freq = params.eval_freq
        self.save_freq = params.save_freq
        self.eval_steps = params.eval_steps
        self.target_update = params.target_update
        self.max_eval_iter = params.max_eval_iter
        self.n_ensemble = params.n_ensemble # 动作预测的头数

        ##中间开始
        self.start_steps = self.params.start_steps # 训练的起始步数
        self.learn_start = self.learn_start + self.start_steps
        self.eval_steps = self.eval_steps + self.start_steps

        self.frame_idx = 0
        self.best_reward = 0

        self.save_path = os.path.join("saves", "bootstrapped_dqn2_", params.name)
        os.makedirs(self.save_path, exist_ok=True)
        self.writer = SummaryWriter(comment="-" + "bootstrapped_dqn2_" + params.name)
        self.logger = common.setup_logger(self.save_path)

        self.build_env()
        self.build_model()
        self.build_buffer()


    def build_env(self):
        self.env = common.wrap_dqn('ALE/MsPacman-v5', stack_frames=self.history_size)
        self.test_env = common.wrap_dqn('ALE/MsPacman-v5', stack_frames=self.history_size, episodic_life=False)  
        self.obs_shape = self.env.observation_space.shape
        self.class_num = self.action_shape = self.env.action_space.n
        


    def build_model(self):
        self.prior_model = model.EnsembleNet(
            n_ensemble=self.n_ensemble, 
            n_actions=self.action_shape, 
            h=self.obs_shape[1], 
            w=self.obs_shape[2], 
            num_channels=self.obs_shape[0]).to(device=self.device)
        self.policy_model = model.EnsembleNet(
            n_ensemble=self.n_ensemble, 
            n_actions=self.action_shape, 
            h=self.obs_shape[1], 
            w=self.obs_shape[2], 
            num_channels=self.obs_shape[0]).to(device=self.device)
        self.policy_model = model.NetWithPrior(self.policy_model, self.prior_model, prior_scale=self.params.prior_scale).to(device=self.device)
        self.target_model = ptan.agent.TargetNet(self.policy_model)
        print(self.policy_model)

        self.optimizer = torch.optim.Adam(self.policy_model.net.parameters(), lr=self.lr) # 只针对online_net进行优化
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10000, gamma=0.9)


    def build_buffer(self):
        self.agent = common.BootStrappedAgent(self.params, self.policy_model, self.env, device=self.device)
        self.exp_source = ptan.experience.ExperienceSourceRAW(self.env, self.agent, steps_count=1)
        self.replay_buffer = common.BootStrappedReplayBuffer(self.exp_source, self.params.memory_size, n_ensemble=self.params.n_ensemble, bernoulli_prob=self.params.bernoulli_prob)

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))

            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
                self.policy_model.load_state_dict(checkpoint['policy_model'])
                self.optimizer.load_state_dict(checkpoint['optimizer'])
                self.target_model.model.load_state_dict(checkpoint['target_model'])
                self.scheduler.load_state_dict(checkpoint['scheduler'])
                self.frame_idx = checkpoint['frame_idx']
                self.best_reward = checkpoint['best_reward']
                self.replay_buffer.load_state_dict(checkpoint['replay_buffer'])
                self.agent.cur_step = self.frame_idx
                
                print("加载模型成功")
                print(f"learning rate: {self.scheduler.get_last_lr()[0]}")
                print(f"scheduler step: {self.scheduler.last_epoch}")


    def save_model(self):
        checkpoint = {
            "frame_idx": self.frame_idx, 
            "best_reward": self.best_reward,
            "policy_model": self.policy_model.state_dict(),
            "target_model": self.target_model.model.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "scheduler": self.scheduler.state_dict(),
            "replay_buffer": self.replay_buffer.state_dict()
        }

        common.save_checkpoints(self.frame_idx, checkpoint, self.save_path, "bootstrapped_dqn2", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")


    def __train(self):
        self.optimizer.zero_grad()
        batch = self.replay_buffer.sample(self.batch_size)

        states_t, actions_t, rewards_t, next_states_t, dones_t, masks_t = common.unpack_batch(batch=batch, device=self.device)

        # 利用下一个状态计算下一个状态的动作值，这里可以理解为Q值，这里可以看成是计算每一个动作的Q值（有多少维度就有多少个Q值）
        target_next_state_action_values = [n.detach() for n in self.target_model.model(torch.Tensor(next_states_t), None)] # 计算下一个状态的动作Q值，不传递梯度
        next_state_action_values = [n.detach() for n in self.policy_model(torch.Tensor(next_states_t), None)] # 这里利用policy_net计算下一个状态的动作Q值，用于Double DQN？不传递梯度
        
        # 计算当前状态的动作值
        state_action_values = self.policy_model(states_t, None)

        total_loss = []
        for head_num in range(self.n_ensemble): # 遍历每一个动作预测头
            # 获取所有训练样本中指定头head_num的样本掩码之和
            total_used = torch.sum(masks_t[:, head_num])
            if total_used > 0.0: # 如果样本中有一个样本的mask是1， 则训练当前头
                # next_state_action_values[head_num]获取对应头的下一个状态动作值，包含所有的样本状态
                # torch.max(next_state_action_values[head_num], dim=1).values.view(-1, 1)获取对应头的下一个状态动作值的最大值，包含所有的样本状态
                policy_next_state_value = next_state_action_values[head_num]
                target_next_state_action_value = target_next_state_action_values[head_num]
                next_state_value = torch.max(policy_next_state_value, dim=1).indices.view(-1, 1)
                next_max_state_value = target_next_state_action_value.gather(1, next_state_value)
                # next_max_state_value = next_max_state_value.squeeze()
                rewards_t = rewards_t.view(-1, 1)
                # rewards_t + (self.discount * next_state_value), 计算目标Q值
                # rewards_t, 计算即时奖励
                # gather(1, terminal)，在维度1上根据terminal索引选择对应的值
                # 索引含义：
                # [:, 0] -> rewards_t + discount * next_state_value  (非终止状态的Q值计算)
                # [:, 1] -> rewards_t                                (终止状态的Q值计算)
                target_state_value = torch.stack([rewards_t + (self.discount * next_max_state_value), rewards_t], dim=1).squeeze().gather(1, dones_t)
                # 当前状态的动作Q值根据模型基于现在的状态预测的动作值结合实际执行的动作得到
                state_action_value = state_action_values[head_num].gather(1, actions_t)
                # 当前状态模型预测的Q值需要和目标Q值进行对齐得到loss
                # reduction='none'表示不进行任何归约操作，返回与输入张量相同形状的张量，这样会保持每个样本的loss
                # 方便后续只计算掩码为1的loss
                loss = F.smooth_l1_loss(state_action_value, target_state_value, reduction='none')
                # 只计算掩码为1的样本的loss，其他样本的loss置为0
                loss = masks_t[:, head_num] * loss
                # 计算当前头的平均loss，除以掩码为1的样本数
                loss = torch.sum(loss / total_used)
                total_loss.append(loss)

        if len(total_loss) > 0:
            # 计算所有头的平均loss
            # 然后开始反向传播和优化
            total_loss = sum(total_loss) # 之前的loss计算，其中的梯度计算时 特征提取网络会重复累积，所以需要除以n_ensemble，但是动作预测头不需要除以n_ensemble，所以需要调整 todo 对比调整
            total_loss.backward()

            for name, param in self.policy_model.named_parameters():
                if param.grad is not None and 'core_net' in name:
                    param.grad.data /= self.n_ensemble

            self.optimizer.step()
    

    
    def train_model(self):
        with common.RewardTracker(self.writer, stop_reward=99999) as tracker:
            while True:
                self.replay_buffer.populate(1)
                self.frame_idx += 1
                new_end_infos = self.exp_source.pop_rewards_steps()
                if new_end_infos:
                    if tracker.reward(new_end_infos, self.frame_idx):
                        break
                
                if self.frame_idx > self.learn_start and self.frame_idx % self.update_freq == 0:
                    self.__train()

                if self.frame_idx > self.learn_start and self.frame_idx % self.save_freq == 0:
                    self.save_model()

                if self.frame_idx > self.learn_start and self.frame_idx % self.target_update == 0:
                    self.target_model.alpha_sync(0.0)
                # 修复调用错误
                if self.frame_idx > self.eval_steps and self.frame_idx % self.eval_freq == 0:  # 每10次训练测试一次
                    self.test_model()


    def test_model(self):
        ts = time.time()
        self.policy_model.eval()
        rewards, steps = Trainer.eval_model(self.policy_model, self.test_env, count=10, device=self.device)
        self.policy_model.train()
        self.logger.info("Test done in %.2f sec, reward %.3f, steps %d" % (
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.frame_idx)
        self.writer.add_scalar("test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                self.logger.info("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
            common.save_best_model(rewards, self.policy_model.state_dict(), self.save_path, 'bootstrapped_dqn2_best')

        self.logger.info(f"save best model, current test score: {rewards}, mean_step: {steps}")


    @torch.no_grad()
    @staticmethod
    def eval_model(net, env, count=10, device="cpu"):
        rewards = 0.0
        steps = 0
        same_action_count = 0
        pre_action = None
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                action = common.choose_action(env, net=net, state=obs, device=device)
                if pre_action == action:
                    same_action_count += 1
                    if same_action_count > 100:
                        break
                else:
                    same_action_count = 0
                    pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
        return rewards / count, steps / count



if __name__ == "__main__":
    parser = common.build_parser()
    args = parser.parse_args()
    device = common.select_device(args=args)

    trainer = Trainer(params=args, device=device)
    trainer.load_model()
    trainer.train_model()

