#!/usr/bin/env python3
'''
正在重构，主体流程已经重构完毕，正在调试是否通顺，以及理解各个流程的代码，如何进行训练的
参考链接：
1. https://github.com/danijar/dreamer?tab=readme-ov-file（tensorflow2）
2. https://github.com/yusukeurakami/dreamer-pytorch（参考这个）
3. https://github.com/zhaoyi11/dreamer-pytorch?tab=readme-ov-file（基本和上面一致）
4. https://github.com/juliusfrost/dreamer-pytorch（过于复杂）


'''
import os
import sys
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
import yaml
import pathlib

from lib import model, common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)


@torch.no_grad()
def test_net(actor_model, transition_model, encoder, belief_size, state_size, n_env, env, count=10, device="cpu", explore=False):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    for _ in range(count):
        belief, posterior_state, action = (
                torch.zeros(n_env, belief_size, device=device),
                torch.zeros(n_env, state_size, device=device),
                torch.zeros(n_env, env.action_space.shape[0], device=device),
            )
        obs, _ = env.reset()
        while True:
            obs = torch.tensor(obs, dtype=torch.float32, device=device)
            common.preprocess_observation_(obs, bit_depth=8)
            belief, _, _, _, posterior_state, _, _ = transition_model(
                posterior_state, action.unsqueeze(dim=0), belief, encoder(obs).unsqueeze(dim=0)
            )  # Action and observation need extra time dimension
            # 移除信念状态和隐状态中的时间维度。
            belief, posterior_state = belief.squeeze(dim=0), posterior_state.squeeze(
                dim=0
            )  # Remove time dimension from belief/state
            action = actor_model.get_action(belief, posterior_state, det=not (explore))
        
            # 然后执行动作得到下一个
            obs, reward, done, truncated, _ = env.step(action[0].cpu().numpy())
            rewards += reward
            steps += 1
            if done or truncated:
                break
    return rewards / count, steps / count


class Trainer:
    def __init__(self, params, device):
        assert params['PRECISION'] in [16, 32], "Only 16 and 32 precision are supported"

        self.params = params
        self.device = device
        self.batch_size = self.params['BATCH_SIZE']
        self.chunk_size = self.params['CHUNK_SIZE']
        self.model_learning_rate = self.params['MODEL_LEARNING_RATE']
        self.learning_rate_schedule = self.params['LEARNING_RATE_SCHEDULE']
        self.overshooting_distance = self.params['OVERSHOOTING_DISTANCE']
        self.overshooting_reward_scale = self.params['OVERSHOOTING_REWARD_SCALE']
        self.overshooting_kl_beta = self.params['OVERSHOOTING_KL_BETA']
        self.state_size = self.params['STATE_SIZE']
        self.belief_size = self.params['BELIEF_SIZE']
        self.test_iters = self.params['TEST_ITERS']
        self.worldmodel_logprobloss = self.params['WORLDMODEL_LOGPROBLOSS']
        self.global_kl_beta = self.params['GLOBAL_KL_BETA']
        self.action_noise = self.params['ACTION_NOISE']
        self.reward_step = self.params['REWARD_STEP']
        self.replay_initial = self.params['REPLAY_INITIAL']
        self.grad_clip_norm = self.params['GRAD_CLIP_NORM']
        self.planning_horizon = self.params['PLANNING_HORIZON']
        self.disclam = self.params['DISCLAM']
        self.discount = self.params['DISCOUNT']
        torch.set_default_dtype(torch.float32 if self.params['PRECISION'] == 32 else torch.float16)

        
        self.save_path = os.path.join("saves", "dreamer-v1-" + self.params['name'])
        os.makedirs(self.save_path, exist_ok=True)
        self.save_path_buffer = os.path.join("saves", "dreamer-v1-" + self.params['name'] + "-buffer")
        os.makedirs(self.save_path_buffer, exist_ok=True)
        self.frame_idx = 0
        self.train_count = 0
        self.global_prior = torch.distributions.Normal(
            torch.zeros(self.params['BATCH_SIZE'], self.state_size, device=device),
            torch.ones(self.params['BATCH_SIZE'], self.state_size, device=device)
        )
        self.free_nats = torch.full((1,), self.params['FREE_NATS'], device=device)

        self.best_reward = None

        self.build_env()
        self.build_model()
        self.build_buffer()

    
    def build_buffer(self):
            
        self.writer = SummaryWriter(comment="-dreamer_" + self.params['name'])
        # 构建DDPG代理
        self.preheat_agent = common.EnvSampleAgent(env=self.test_env, device=self.device)
        self.exp_source = ptan.experience.ExperienceSourceRAW(self.env, self.preheat_agent, steps_count=self.reward_step)
        self.buffer = ptan.experience.ExperienceReplayChunkBuffer(self.exp_source, buffer_size=self.params['REPLAY_SIZE'])



    def build_env(self):
        # todo dreamerv1可以不使用多帧堆叠，尝试一下
        self.env = common.wrap_dqn(gym.make('CarRacing-v3', render_mode="rgb_array", lap_complete_percent=0.95, domain_randomize=True, continuous=True))
        self.test_env = common.wrap_dqn(gym.make('CarRacing-v3', render_mode="rgb_array", lap_complete_percent=0.95, domain_randomize=True, continuous=True))


    def build_model(self):
        # 构建动作网络和评价网络
        self.transition_model = model.Dreamer(belief_size=self.belief_size, state_size=self.state_size, act_size=self.env.action_space.shape[0], hidden_size=self.params['HIDE_SIZE'], embedding_size=self.params['EMBEDDING_SIZE']).to(device=self.device)
        self.observation_model = model.ConvDreamerObservation(belief_size=self.belief_size, state_size=self.state_size, embedding_size=self.params['EMBEDDING_SIZE']).to(device=device)
        self.reward_model = model.RewardDreamerModel(belief_size=self.belief_size, state_size=self.state_size, hidden_size=self.params['HIDE_SIZE']).to(device=device)
        self.encoder = model.ConvDreamerEncoder(obs_size=self.env.observation_space.shape, embedding_size=self.params['EMBEDDING_SIZE']).to(device=device)
        self.actor_model = model.DreamerActorModel(belief_size=self.belief_size, state_size=self.state_size, act_size=self.env.action_space.shape[0], hidden_size=self.params['HIDE_SIZE'], device=device).to(device=device)
        self.value_model = model.DreamerValueModel(belief_size=self.belief_size, state_size=self.state_size, hidden_size=self.params['HIDE_SIZE']).to(device=device)
        print(self.transition_model)
        print(self.observation_model)
        print(self.reward_model)
        print(self.encoder)
        print(self.actor_model)
        print(self.value_model)

        self.param_list = list(self.transition_model.parameters()) + list(self.observation_model.parameters()) + list(self.reward_model.parameters()) + list(self.encoder.parameters())
        self.value_actor_param_list = list(self.actor_model.parameters()) + list(self.value_model.parameters())
        self.all_params_list = self.param_list + self.value_actor_param_list

        # todo 这里为啥要这么写？
        # AI 推测是LEARNING_RATE_SCHEDULE == 0时，可以手动的控制学习率
        self.param_opt = optim.Adam(self.param_list, lr=0 if self.learning_rate_schedule != 0 else self.model_learning_rate, eps = self.params['ADAM_EPSILON'])
        self.param_opt_scheduler = optim.lr_scheduler.StepLR(self.param_opt, step_size=50000, gamma=0.9)
        self.param_act_opt = optim.Adam(self.actor_model.parameters(), lr = 0 if self.learning_rate_schedule != 0 else self.params['ACTOR_LEARNING_RATE'], eps=self.params['ADAM_EPSILON'])
        self.param_act_opt_scheduler = optim.lr_scheduler.StepLR(self.param_act_opt, step_size=50000, gamma=0.9)
        self.param_value_opt = optim.Adam(self.value_model.parameters(), lr = 0 if self.learning_rate_schedule != 0 else self.params['VALUE_LEARNING_RATE'], eps=self.params['ADAM_EPSILON'])
        self.param_value_opt_scheduler = optim.lr_scheduler.StepLR(self.param_value_opt, step_size=50000, gamma=0.9)

        self.model_modules = [self.transition_model, self.encoder, self.observation_model, self.reward_model]



    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
                self.transition_model.load_state_dict(checkpoint['transition_model'])
                self.observation_model.load_state_dict(checkpoint['observation_model'])
                self.reward_model.load_state_dict(checkpoint['reward_model'])
                self.encoder.load_state_dict(checkpoint['encoder'])
                self.actor_model.load_state_dict(checkpoint['actor_model'])
                self.value_model.load_state_dict(checkpoint['value_model'])
                self.param_opt.load_state_dict(checkpoint['param_opt'])
                self.param_act_opt.load_state_dict(checkpoint['param_act_opt'])
                self.param_value_opt.load_state_dict(checkpoint['param_value_opt'])
                self.param_opt_scheduler.load_state_dict(checkpoint['param_opt_scheduler'])
                self.param_act_opt_scheduler.load_state_dict(checkpoint['param_act_opt_scheduler'])
                self.param_value_opt_scheduler.load_state_dict(checkpoint['param_value_opt_scheduler'])
                self.frame_idx = checkpoint['frame_idx']
                self.train_count = checkpoint['train_count']
            
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path_buffer)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                buffer = torch.load(os.path.join(self.save_path_buffer, checkpoints[-1]))
                print("加载buffer成功")

            print("加载模型成功")


    def save_trainer(self):
        checkpoints = {
            "transition_model": self.transition_model.state_dict(),
            "observation_model": self.observation_model.state_dict(),
            "reward_model": self.reward_model.state_dict(),
            "encoder": self.encoder.state_dict(),
            "actor_model": self.actor_model.state_dict(),
            "value_model": self.value_model.state_dict(),
            "param_act_opt": self.param_act_opt.state_dict(),
            "param_value_opt": self.param_value_opt.state_dict(),
            "param_opt": self.param_opt.state_dict(),
            "param_opt_scheduler": self.param_opt_scheduler.state_dict(),
            "param_act_opt_scheduler": self.param_act_opt_scheduler.state_dict(),
            "param_value_opt_scheduler": self.param_value_opt_scheduler.state_dict(),
            "frame_idx": self.frame_idx,
            "train_count": self.train_count,
        }

        common.save_checkpoints(self.train_count, checkpoints, self.save_path, 'dreamer_v1')
        common.save_checkpoints(self.train_count, self.buffer, self.save_path_buffer, 'dreamer_v1_buffer')


    def __collect_states(self):
        with torch.no_grad():
            self.actor_model.eval()
            self.transition_model.eval()
            self.encoder.eval()
            update_beliefs_act_agent = common.UpdateBeliefActAgent(
                actor_model=self.actor_model, 
                transition_model=self.transition_model, 
                encoder=self.encoder, 
                belief_size=self.belief_size, 
                state_size=self.state_size, 
                action_size=self.env.action_space.shape[0],
                explore=True, 
                action_noise=self.action_noise,
                device=device)
            
            exp_source = ptan.experience.ExperienceSourceRAW(self.env, update_beliefs_act_agent, steps_count=self.reward_step)
            self.buffer.set_exp_source(exp_source)
            pbar = tqdm(range(self.replay_initial))
            for t in pbar:
                self.buffer.populate(1)

            self.actor_model.train()
            self.transition_model.train()
            self.encoder.train()


    def collect_seed_episodes(self):
        if len(self.buffer) < self.replay_initial:
            print("模型预热")
            for _ in range(self.replay_initial):
                self.buffer.populate(1)
            print("模型预热完成")


    def train(self):
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
                while True:
                    self.frame_idx += 1
                    rewards_steps = self.exp_source.pop_rewards_steps()
                    if rewards_steps:
                        # 记录当前的训练进度并判断是否达到了奖励目标
                        rewards, steps = zip(*rewards_steps)
                        tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                        tracker.reward(rewards[0], self.frame_idx)
                    
                    self.__train_trainer()
                    self.__collect_states()

                    if self.train_count % self.test_iters == 0:
                       self.__test_trainer()

                    self.save_trainer()


    def __train_trainer(self):
        for _ in range(self.params['TRAIN_COUNT']):
            observations = np.empty((self.batch_size, self.chunk_size) + self.env.observation_space.shape, dtype=np.uint8)
            actions = np.empty((self.batch_size, self.chunk_size, self.env.action_space.shape[0]), dtype=np.float32)
            rewards = np.empty((self.batch_size, self.chunk_size), dtype=np.float32)
            non_dones = np.empty((self.batch_size, self.chunk_size, 1), dtype=np.bool_)
            # 从缓冲区里面采样数据
            batch = self.buffer.sample(self.batch_size, self.chunk_size)
            for batch_idx in range(0, self.batch_size):
                cur_batch = batch[batch_idx]
                for step_idx in range(self.chunk_size):
                    observations[batch_idx][step_idx] = cur_batch[step_idx][0][0]
                    actions[batch_idx][step_idx] = cur_batch[step_idx][0][1]
                    rewards[batch_idx][step_idx] = cur_batch[step_idx][0][2]
                    non_dones[batch_idx][step_idx] = not cur_batch[step_idx][0][3]

            observations_n = np.array(observations).transpose(1, 0, 2, 3, 4).reshape((self.chunk_size, self.batch_size) + self.env.observation_space.shape)
            actions_n = np.array(actions).transpose(1, 0, 2).reshape(self.chunk_size, self.batch_size, -1)
            rewards_n = np.array(rewards).transpose(1, 0).reshape(self.chunk_size, self.batch_size)
            non_dones_n = np.array(non_dones).transpose(1, 0, 2).reshape(self.chunk_size, self.batch_size, 1)

            observations_v = torch.FloatTensor(observations_n).to(device=device)
            common.preprocess_observation_(observations_v, bit_depth=8)
            actions_v = torch.tensor(actions_n, device=device)
            rewards_v = torch.tensor(rewards_n, device=device)
            non_dones = torch.tensor(non_dones_n, device=device)
            

            init_belief, init_state = torch.zeros(self.batch_size, self.belief_size, device=device), torch.zeros(self.batch_size,  self.state_size, device=device)

            beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = self.transition_model(init_state, actions_v[:-1], init_belief, model.bottle(self.encoder, (observations_v[1:],)), non_dones[:-1])
            if self.worldmodel_logprobloss:
                observation_dist = torch.distributions.Normal(model.bottle( self.observation_model, (beliefs, posterior_states)), 1)
                observation_loss = (
                    -observation_dist.log_prob(observations_v[1:]).sum(dim=(2, 3, 4)).mean(dim=(0, 1))
                )
            else:  
                observation_loss = (
                    F.mse_loss(model.bottle(self.observation_model, (beliefs, posterior_states)), observations_v[1:], reduction='none')
                    .sum(dim=(2, 3, 4))
                    .mean(dim=(0, 1))
                )

            if  self.worldmodel_logprobloss:
                reward_dist = torch.distributions.Normal(model.bottle( self.reward_model, (beliefs, posterior_states)), 1)
                reward_loss = -reward_dist.log_prob(rewards_v[:-1]).mean(dim=(0, 1))
            else:
                reward_loss = F.mse_loss(model.bottle(self.reward_model, (beliefs, posterior_states)), rewards_v[:-1], reduction='none').mean(dim=(0, 1))
            
            div = torch.distributions.kl.kl_divergence(torch.distributions.Normal(prior_means, prior_std_devs), torch.distributions.Normal(posterior_means, posterior_std_devs)).sum(dim=2)
            kl_loss = torch.max(div, self.free_nats).mean(dim=(0, 1))

            if  self.global_kl_beta != 0:
                kl_loss =  self.global_kl_beta * torch.distributions.kl.kl_divergence(torch.distributions.Normal(posterior_means, posterior_std_devs), self.global_prior).sum(dim=2).mean(dim=(0, 1))

            if self.overshooting_kl_beta != 0:
                overshooting_vars = []
                for t in range(1,  self.chunk_size - 1):
                    d = min(t + self.overshooting_distance, self.chunk_size - 1)
                    t_, d_ = t - 1, d - 1
                    seq_pad = (
                        0, 0, 0, 0, 0, t - d + self.overshooting_distance
                    )

                    overshooting_vars.append(
                        (
                            F.pad(actions_v[t:d], seq_pad),
                            F.pad(non_dones[t:d], seq_pad),
                            F.pad(rewards_v[t:d], seq_pad[2:]),
                            beliefs[t_],
                            prior_states[t_],
                            F.pad(posterior_means[t_ + 1: d_ + 1].detach(), seq_pad),
                            F.pad(posterior_std_devs[t_ + 1: d_ + 1].detach(), seq_pad, value=1),
                            F.pad(torch.ones(d - t, self.batch_size, self.state_size, device=device), seq_pad),
                        )
                    )
                overshooting_vars = tuple(zip(*overshooting_vars))

                beliefs, prior_states, prior_means, prior_std_devs = self.transition_model(
                    torch.cat(overshooting_vars[4], dim=0),
                    torch.cat(overshooting_vars[0], dim=1),
                    torch.cat(overshooting_vars[3], dim=0),
                    None, 
                    torch.cat(overshooting_vars[1], dim=1),
                )

                seq_mask = torch.cat(overshooting_vars[7], dim=1)
                kl_loss += (
                    (1 / self.overshooting_distance) * self.overshooting_kl_beta * torch.max((torch.distributions.kl.kl_divergence(torch.distributions.Normal(torch.cat(overshooting_vars[5], dim=1), torch.cat(overshooting_vars[6], dim=1)), torch.distributions.Normal(prior_means, prior_std_devs),) * seq_mask).sum(dim=2), free_nats,).mean(dim=(0, 1)) * (self.chunk_size - 1)
                )

                if self.overshooting_reward_scale != 0:
                    reward_loss += (
                        (1 / self.overshooting_distance) * self.overshooting_reward_scale * F.mse_loss(model.bottle(self.reward_model, (beliefs, prior_states)) * seq_mask[:, :, 0], torch.cat(overshooting_vars[2], dim=1), reduction='none').mean(dim=(0, 1)) * (self.chunk_size - 1)
                    )

            if self.learning_rate_schedule != 0:
                for group in self.param_opt.param_groups:
                    group['lr'] = min(
                        group['lr'] + (self.model_learning_rate / self.learning_rate_schedule), self.model_learning_rate
                    )

            model_loss = observation_loss + reward_loss + kl_loss
            self.param_opt.zero_grad()
            model_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.param_list, self.grad_clip_norm, norm_type=2)
            self.param_opt.step()

            with torch.no_grad():
                actor_states = posterior_states.detach()
                actor_beliefs = beliefs.detach()

            with common.FreezeParameters(self.model_modules):
                imagination_traj = common.imagine_ahead(
                    actor_states, actor_beliefs, self.actor_model, self.transition_model, self.planning_horizon
                )

            imaged_beliefs, imaged_prior_states, imged_prior_means, imged_prior_std_devs = imagination_traj
            with common.FreezeParameters(self.model_modules + [self.value_model]):
                imged_reward = model.bottle(self.reward_model, (imaged_beliefs, imaged_prior_states))
                value_pred = model.bottle(self.value_model, (imaged_beliefs, imaged_prior_states))

            returns = common.lambda_return(
                imged_reward, value_pred, bootstrap=value_pred[-1], discount=self.discount, lambda_=self.disclam
            )

            actor_loss = -torch.mean(returns)
            self.param_act_opt.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor_model.parameters(), self.grad_clip_norm, norm_type=2)
            self.param_act_opt.step()

            with torch.no_grad():
                value_beliefs = imaged_beliefs.detach()
                value_prior_states = imaged_prior_states.detach()
                target_return = returns.detach()
            value_dist = torch.distributions.Normal(
                model.bottle(self.value_model, (value_beliefs, value_prior_states)), 1
            )

            value_loss = -value_dist.log_prob(target_return).mean(dim=(0, 1))
            self.param_value_opt.zero_grad()
            value_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.value_model.parameters(), self.grad_clip_norm, norm_type=2)
            self.param_value_opt.step()
        train_count += 1


    def __test_trainer(self):
         # 测试并保存最好测试结果的庶数据
        ts = time.time()
        self.actor_model.eval()
        self.transition_model.eval()
        self.encoder.eval()
        rewards, steps = test_net(self.actor_model, self.transition_model, self.encoder, belief_size=self.belief_size, state_size=self.state_size, n_env=1, env=self.test_env, count=10, device=device)
        self.actor_model.train()
        self.transition_model.train()
        self.encoder.train()
        print("Test done in %.2f sec, reward %.3f, steps %d" % (
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.frame_idx)
        self.writer.add_scalar("test_steps", steps, self.frame_idx)
        if best_reward is None or best_reward < rewards:
            if best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                name = "best_%+.3f_%d.dat" % (rewards, self.frame_idx)
                fname = os.path.join(self.save_path, name)
            best_reward = rewards
        
        checkpoints = {
                    "actor_model": self.actor_model.state_dict(),
                    "transition_model": self.transition_model.state_dict(),
                    "encoder": self.encoder.state_dict()
                }
        common.save_best_model(rewards, checkpoints, self.save_path, 'dreamer_v1_best')

        


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="carracing", help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/carracing_configs.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)


    trainer = Trainer(params, device)
    trainer.load_model()
    trainer.collect_seed_episodes()
    trainer.train()
