#!/usr/bin/env python3
'''
完成适配
参考链接：
1. https://github.com/danijar/dreamer?tab=readme-ov-file（tensorflow2）
2. https://github.com/yusukeurakami/dreamer-pytorch（参考这个）
3. https://github.com/zhaoyi11/dreamer-pytorch?tab=readme-ov-file（基本和上面一致）
4. https://github.com/juliusfrost/dreamer-pytorch（过于复杂）


训练记录：
在2号机上训练
20250317: 测试分数37.19，训练分数 -45.592，训练比较慢，感觉是上升的
20250318: 继续训练，学习率未变化，测试分数399.85，训练分数323.628，训练比较慢，感觉是上升的
20250319: 继续训练，学习率未变化，加载buffer成功，加载模型成功，测试分数431.67，训练分数298.977，继续训练
20250320: 继续训练，学习率未变化，测试分数461.83，训练分数407.757，继续训练
20250321: 继续训练，学习率未变化，测试分数680.36，训练分数486.762，继续训练，看样子是可以训练的，但是训练速度比较慢
20250322: 暂停训练一天
20250323: 暂停训练一天
20250324: 继续训练，学习率未变化，测试分数796.644650398933.，已经达到了之前a2c方法的极限了，继续训练一天
20250325：继续训练，学习率未变化，加载buffer成功，加载模型成功
20250326:因为天气炎热，没法同时训练，先暂停
20250326：继续暂停训练一天，明天训练一天
20250327:继续训练，学习率未变化，测试分数796.64，训练分数 708.164，继续训练
20250328：继续训练，学习率未变化，测试分数812.2785513197005，训练分数804.314，继续训练，已超越了ddpg
20250329:继续训练，学习率未变化，测试分数828.79，训练分数793.122，停止训练，play模型，重构代码，模拟planet
'''
import os
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm

from lib import model, common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)

ENV_ID = "ALE/IceHockey-v5"
GAMMA = 0.99
BATCH_SIZE = 64
MODEL_LEARNING_RATE = 1e-3
ACTOR_LEARNING_RATE = 8e-5
VALUE_LEARNING_RATE = 8e-5
ADAM_EPSILON = 1e-7
LEARNING_RATE_SCHEDULE = 0
REPLAY_SIZE = 100000 # 重放缓冲区长度，这么长是为了提高稳定性
REPLAY_INITIAL = 10000 # 重放缓冲区初始化大小

TEST_ITERS = 1

PRECISION = 32
STATE_SIZE = 30
HIDE_SIZE = 200
BELIEF_SIZE = 200
REWARD_STEP = 1
EMBEDDING_SIZE = 1024
FREE_NATS = float(3.0)
WORLDMODEL_LOGPROBLOSS = True
GLOBAL_KL_BETA = float(0)
OVERSHOOTING_KL_BETA = float(0)
OVERSHOOTING_DISTANCE = 50
CHUNK_SIZE = 50
OVERSHOOTING_REWARD_SCALE = float(0)
PLANNING_HORIZON = 15
GRAD_CLIP_NORM = 100
TRAIN_COUNT = 100
DISCOUNT = 0.99
DISCLAM = 0.95
ACTION_NOISE = 0.3


@torch.no_grad()
def test_net(actor_model, transition_model, encoder, belief_size, state_size, n_env, env, count=10, device="cpu", explore=False):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    for _ in range(count):
        belief, posterior_state, action = (
                torch.zeros(n_env, belief_size, device=device),
                torch.zeros(n_env, state_size, device=device),
                torch.zeros(n_env, env.action_space.shape[0], device=device),
            )
        obs, _ = env.reset()
        while True:
            obs = torch.tensor(obs, dtype=torch.float32, device=device)
            common.preprocess_observation_(obs, bit_depth=8)
            belief, _, _, _, posterior_state, _, _ = transition_model(
                posterior_state, action.unsqueeze(dim=0), belief, encoder(obs).unsqueeze(dim=0)
            )  # Action and observation need extra time dimension
            # 移除信念状态和隐状态中的时间维度。
            belief, posterior_state = belief.squeeze(dim=0), posterior_state.squeeze(
                dim=0
            )  # Remove time dimension from belief/state
            action = actor_model.get_action(belief, posterior_state, det=not (explore))
        
            # 然后执行动作得到下一个
            obs, reward, done, truncated, _ = env.step(action[0].cpu().numpy())
            rewards += reward
            steps += 1
            if done or truncated:
                break
    return rewards / count, steps / count



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="carracing", help="Name of the run")
    args = parser.parse_args()
    device = common.select_device(args=args)

    assert PRECISION in [16, 32], "Only 16 and 32 precision are supported"
    torch.set_default_dtype(torch.float32 if PRECISION == 32 else torch.float16)

    save_path = os.path.join("saves", "dreamer-v1-" + args.name)
    os.makedirs(save_path, exist_ok=True)
    save_path_buffer = os.path.join("saves", "dreamer-v1-" + args.name + "-buffer")
    os.makedirs(save_path_buffer, exist_ok=True)

    # todo dreamerv1可以不使用多帧堆叠，尝试一下
    env = common.wrap_dqn(gym.make('CarRacing-v3', render_mode="rgb_array", lap_complete_percent=0.95, domain_randomize=True, continuous=True))
    test_env = common.wrap_dqn(gym.make('CarRacing-v3', render_mode="rgb_array", lap_complete_percent=0.95, domain_randomize=True, continuous=True))

    # 构建动作网络和评价网络
    transition_model = model.Dreamer(belief_size=BELIEF_SIZE, state_size=STATE_SIZE, act_size=env.action_space.shape[0], hidden_size=HIDE_SIZE, embedding_size=EMBEDDING_SIZE).to(device=device)
    observation_model = model.ConvDreamerObservation(belief_size=BELIEF_SIZE, state_size=STATE_SIZE, embedding_size=EMBEDDING_SIZE).to(device=device)
    reward_model = model.RewardDreamerModel(belief_size=BELIEF_SIZE, state_size=STATE_SIZE, hidden_size=HIDE_SIZE).to(device=device)
    encoder = model.ConvDreamerEncoder(obs_size=env.observation_space.shape, embedding_size=EMBEDDING_SIZE).to(device=device)
    actor_model = model.DreamerActorModel(belief_size=BELIEF_SIZE, state_size=STATE_SIZE, act_size=env.action_space.shape[0], hidden_size=HIDE_SIZE, device=device).to(device=device)
    value_model = model.DreamerValueModel(belief_size=BELIEF_SIZE, state_size=STATE_SIZE, hidden_size=HIDE_SIZE).to(device=device)
    print(transition_model)
    print(observation_model)
    print(reward_model)
    print(encoder)
    print(actor_model)
    print(value_model)

    param_list = list(transition_model.parameters()) + list(observation_model.parameters()) + list(reward_model.parameters()) + list(encoder.parameters())
    value_actor_param_list = list(actor_model.parameters()) + list(value_model.parameters())
    all_params_list = param_list + value_actor_param_list

    param_opt = optim.Adam(param_list, lr=0 if LEARNING_RATE_SCHEDULE != 0 else MODEL_LEARNING_RATE, eps = ADAM_EPSILON)
    param_opt_scheduler = optim.lr_scheduler.StepLR(param_opt, step_size=50000, gamma=0.9)
    param_act_opt = optim.Adam(actor_model.parameters(), lr = 0 if LEARNING_RATE_SCHEDULE != 0 else ACTOR_LEARNING_RATE, eps=ADAM_EPSILON)
    param_act_opt_scheduler = optim.lr_scheduler.StepLR(param_act_opt, step_size=50000, gamma=0.9)
    param_value_opt = optim.Adam(value_model.parameters(), lr = 0 if LEARNING_RATE_SCHEDULE != 0 else VALUE_LEARNING_RATE, eps=ADAM_EPSILON)
    param_value_opt_scheduler = optim.lr_scheduler.StepLR(param_value_opt, step_size=50000, gamma=0.9)


    writer = SummaryWriter(comment="-dreamer_" + args.name)
    # 构建DDPG代理
    preheat_agent = common.EnvSampleAgent(env=test_env, device=device)
    exp_source = ptan.experience.ExperienceSourceRAW(env, preheat_agent, steps_count=REWARD_STEP)
    buffer = ptan.experience.ExperienceReplayChunkBuffer(exp_source, buffer_size=REPLAY_SIZE)

    frame_idx = 0
    train_count = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            transition_model.load_state_dict(checkpoint['transition_model'])
            observation_model.load_state_dict(checkpoint['observation_model'])
            reward_model.load_state_dict(checkpoint['reward_model'])
            encoder.load_state_dict(checkpoint['encoder'])
            actor_model.load_state_dict(checkpoint['actor_model'])
            value_model.load_state_dict(checkpoint['value_model'])
            param_opt.load_state_dict(checkpoint['param_opt'])
            param_act_opt.load_state_dict(checkpoint['param_act_opt'])
            param_value_opt.load_state_dict(checkpoint['param_value_opt'])
            param_opt_scheduler.load_state_dict(checkpoint['param_opt_scheduler'])
            param_act_opt_scheduler.load_state_dict(checkpoint['param_act_opt_scheduler'])
            param_value_opt_scheduler.load_state_dict(checkpoint['param_value_opt_scheduler'])
            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']
        
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path_buffer)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        if len(checkpoints) > 0:
            buffer = torch.load(os.path.join(save_path_buffer, checkpoints[-1]))
            print("加载buffer成功")

        print("加载模型成功")
    else:
        print("模型预热")
        for _ in range(REPLAY_INITIAL):
            buffer.populate(1)
        print("模型预热完成")

    global_prior = torch.distributions.Normal(
        torch.zeros(BATCH_SIZE, STATE_SIZE, device=device),
        torch.ones(BATCH_SIZE, STATE_SIZE, device=device)
    )
    free_nats = torch.full((1,), FREE_NATS, device=device)
    best_reward = None
    model_modules = [transition_model, encoder, observation_model, reward_model]
    with ptan.common.utils.RewardTracker(writer) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=100) as tb_tracker:
            while True:
                frame_idx += 1
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    # 记录当前的训练进度并判断是否达到了奖励目标
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", steps[0], frame_idx)
                    tracker.reward(rewards[0], frame_idx)
                
                
                for _ in range(TRAIN_COUNT):
                    observations = np.empty((BATCH_SIZE, CHUNK_SIZE) + env.observation_space.shape, dtype=np.uint8)
                    actions = np.empty((BATCH_SIZE, CHUNK_SIZE, env.action_space.shape[0]), dtype=np.float32)
                    rewards = np.empty((BATCH_SIZE, CHUNK_SIZE), dtype=np.float32)
                    non_dones = np.empty((BATCH_SIZE, CHUNK_SIZE, 1), dtype=np.bool_)
                    # 从缓冲区里面采样数据
                    batch = buffer.sample(BATCH_SIZE, CHUNK_SIZE)
                    for batch_idx in range(0, BATCH_SIZE):
                        cur_batch = batch[batch_idx]
                        for step_idx in range(CHUNK_SIZE):
                            observations[batch_idx][step_idx] = cur_batch[step_idx][0][0]
                            actions[batch_idx][step_idx] = cur_batch[step_idx][0][1]
                            rewards[batch_idx][step_idx] = cur_batch[step_idx][0][2]
                            non_dones[batch_idx][step_idx] = not cur_batch[step_idx][0][3]

                    observations_n = np.array(observations).transpose(1, 0, 2, 3, 4).reshape((CHUNK_SIZE, BATCH_SIZE) + env.observation_space.shape)
                    actions_n = np.array(actions).transpose(1, 0, 2).reshape(CHUNK_SIZE, BATCH_SIZE, -1)
                    rewards_n = np.array(rewards).transpose(1, 0).reshape(CHUNK_SIZE, BATCH_SIZE)
                    non_dones_n = np.array(non_dones).transpose(1, 0, 2).reshape(CHUNK_SIZE, BATCH_SIZE, 1)

                    observations_v = torch.FloatTensor(observations_n).to(device=device)
                    common.preprocess_observation_(observations_v, bit_depth=8)
                    actions_v = torch.tensor(actions_n, device=device)
                    rewards_v = torch.tensor(rewards_n, device=device)
                    non_dones = torch.tensor(non_dones_n, device=device)
                    

                    init_belief, init_state = torch.zeros(BATCH_SIZE, BELIEF_SIZE, device=device), torch.zeros(BATCH_SIZE, STATE_SIZE, device=device)

                    beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = transition_model(init_state, actions_v[:-1], init_belief, model.bottle(encoder, (observations_v[1:],)), non_dones[:-1])
                    if WORLDMODEL_LOGPROBLOSS:
                        observation_dist = torch.distributions.Normal(model.bottle(observation_model, (beliefs, posterior_states)), 1)
                        observation_loss = (
                            -observation_dist.log_prob(observations_v[1:]).sum(dim=(2, 3, 4)).mean(dim=(0, 1))
                        )
                    else:  
                        observation_loss = (
                            F.mse_loss(model.bottle(observation_model, (beliefs, posterior_states)), observations_v[1:], reduction='none')
                            .sum(dim=(2, 3, 4))
                            .mean(dim=(0, 1))
                        )

                    if WORLDMODEL_LOGPROBLOSS:
                        reward_dist = torch.distributions.Normal(model.bottle(reward_model, (beliefs, posterior_states)), 1)
                        reward_loss = -reward_dist.log_prob(rewards_v[:-1]).mean(dim=(0, 1))
                    else:
                        reward_loss = F.mse_loss(model.bottle(reward_model, (beliefs, posterior_states)), rewards_v[:-1], reduction='none').mean(dim=(0, 1))
                    
                    div = torch.distributions.kl.kl_divergence(torch.distributions.Normal(prior_means, prior_std_devs), torch.distributions.Normal(posterior_means, posterior_std_devs)).sum(dim=2)
                    kl_loss = torch.max(div, free_nats).mean(dim=(0, 1))

                    if GLOBAL_KL_BETA != 0:
                        kl_loss = GLOBAL_KL_BETA * torch.distributions.kl.kl_divergence(torch.distributions.Normal(posterior_means, posterior_std_devs), global_prior).sum(dim=2).mean(dim=(0, 1))

                    if OVERSHOOTING_KL_BETA != 0:
                        overshooting_vars = []
                        for t in range(1, CHUNK_SIZE - 1):
                            d = min(t + OVERSHOOTING_DISTANCE, CHUNK_SIZE - 1)
                            t_, d_ = t - 1, d - 1
                            seq_pad = (
                                0, 0, 0, 0, 0, t - d + OVERSHOOTING_DISTANCE
                            )

                            overshooting_vars.append(
                                (
                                    F.pad(actions_v[t:d], seq_pad),
                                    F.pad(non_dones[t:d], seq_pad),
                                    F.pad(rewards_v[t:d], seq_pad[2:]),
                                    beliefs[t_],
                                    prior_states[t_],
                                    F.pad(posterior_means[t_ + 1: d_ + 1].detach(), seq_pad),
                                    F.pad(posterior_std_devs[t_ + 1: d_ + 1].detach(), seq_pad, value=1),
                                    F.pad(torch.ones(d - t, BATCH_SIZE, STATE_SIZE, device=device), seq_pad),
                                )
                            )
                        overshooting_vars = tuple(zip(*overshooting_vars))

                        beliefs, prior_states, prior_means, prior_std_devs = transition_model(
                            torch.cat(overshooting_vars[4], dim=0),
                            torch.cat(overshooting_vars[0], dim=1),
                            torch.cat(overshooting_vars[3], dim=0),
                            None, 
                            torch.cat(overshooting_vars[1], dim=1),
                        )

                        seq_mask = torch.cat(overshooting_vars[7], dim=1)
                        kl_loss += (
                            (1 / OVERSHOOTING_DISTANCE) * OVERSHOOTING_KL_BETA * torch.max((torch.distributions.kl.kl_divergence(torch.distributions.Normal(torch.cat(overshooting_vars[5], dim=1), torch.cat(overshooting_vars[6], dim=1)), torch.distributions.Normal(prior_means, prior_std_devs),) * seq_mask).sum(dim=2), free_nats,).mean(dim=(0, 1)) * (CHUNK_SIZE - 1)
                        )

                        if OVERSHOOTING_REWARD_SCALE != 0:
                            reward_loss += (
                                (1 / OVERSHOOTING_DISTANCE) * OVERSHOOTING_REWARD_SCALE * F.mse_loss(model.bottle(reward_model, (beliefs, prior_states)) * seq_mask[:, :, 0], torch.cat(overshooting_vars[2], dim=1), reduction='none').mean(dim=(0, 1)) * (CHUNK_SIZE - 1)
                            )

                    if LEARNING_RATE_SCHEDULE != 0:
                        for group in param_opt.param_groups:
                            group['lr'] = min(
                                group['lr'] + (MODEL_LEARNING_RATE / LEARNING_RATE_SCHEDULE), MODEL_LEARNING_RATE
                            )

                    model_loss = observation_loss + reward_loss + kl_loss
                    param_opt.zero_grad()
                    model_loss.backward()
                    torch.nn.utils.clip_grad_norm_(param_list, GRAD_CLIP_NORM, norm_type=2)
                    param_opt.step()

                    with torch.no_grad():
                        actor_states = posterior_states.detach()
                        actor_beliefs = beliefs.detach()

                    with common.FreezeParameters(model_modules):
                        imagination_traj = common.imagine_ahead(
                            actor_states, actor_beliefs, actor_model, transition_model, PLANNING_HORIZON
                        )

                    imaged_beliefs, imaged_prior_states, imged_prior_means, imged_prior_std_devs = imagination_traj
                    with common.FreezeParameters(model_modules + [value_model]):
                        imged_reward = model.bottle(reward_model, (imaged_beliefs, imaged_prior_states))
                        value_pred = model.bottle(value_model, (imaged_beliefs, imaged_prior_states))

                    returns = common.lambda_return(
                        imged_reward, value_pred, bootstrap=value_pred[-1], discount=DISCOUNT, lambda_=DISCLAM
                    )

                    actor_loss = -torch.mean(returns)
                    param_act_opt.zero_grad()
                    actor_loss.backward()
                    torch.nn.utils.clip_grad_norm_(actor_model.parameters(), GRAD_CLIP_NORM, norm_type=2)
                    param_act_opt.step()

                    with torch.no_grad():
                        value_beliefs = imaged_beliefs.detach()
                        value_prior_states = imaged_prior_states.detach()
                        target_return = returns.detach()
                    value_dist = torch.distributions.Normal(
                        model.bottle(value_model, (value_beliefs, value_prior_states)), 1
                    )

                    value_loss = -value_dist.log_prob(target_return).mean(dim=(0, 1))
                    param_value_opt.zero_grad()
                    value_loss.backward()
                    torch.nn.utils.clip_grad_norm_(value_model.parameters(), GRAD_CLIP_NORM, norm_type=2)
                    param_value_opt.step()
                
                train_count += 1
                with torch.no_grad():
                    actor_model.eval()
                    transition_model.eval()
                    encoder.eval()
                    update_beliefs_act_agent = common.UpdateBeliefActAgent(
                        actor_model=actor_model, 
                        transition_model=transition_model, 
                        encoder=encoder, 
                        belief_size=BELIEF_SIZE, 
                        state_size=STATE_SIZE, 
                        action_size=env.action_space.shape[0],
                        explore=True, 
                        action_noise=ACTION_NOISE,
                        device=device)
                    
                    exp_source = ptan.experience.ExperienceSourceRAW(env, update_beliefs_act_agent, steps_count=REWARD_STEP)
                    buffer.set_exp_source(exp_source)
                    pbar = tqdm(range(REPLAY_INITIAL))
                    for t in pbar:
                        buffer.populate(1)

                    actor_model.train()
                    transition_model.train()
                    encoder.train()



                if train_count % TEST_ITERS == 0:
                    # 测试并保存最好测试结果的庶数据
                    ts = time.time()
                    actor_model.eval()
                    transition_model.eval()
                    encoder.eval()
                    rewards, steps = test_net(actor_model, transition_model, encoder, belief_size=BELIEF_SIZE, state_size=STATE_SIZE, n_env=1, env=test_env, count=10, device=device)
                    actor_model.train()
                    transition_model.train()
                    encoder.train()
                    print("Test done in %.2f sec, reward %.3f, steps %d" % (
                        time.time() - ts, rewards, steps))
                    writer.add_scalar("test_reward", rewards, frame_idx)
                    writer.add_scalar("test_steps", steps, frame_idx)
                    if best_reward is None or best_reward < rewards:
                        if best_reward is not None:
                            print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                            name = "best_%+.3f_%d.dat" % (rewards, frame_idx)
                            fname = os.path.join(save_path, name)
                        best_reward = rewards
                    
                    checkpoints = {
                                "actor_model": actor_model.state_dict(),
                                "transition_model": transition_model.state_dict(),
                                "encoder": encoder.state_dict()
                            }
                    common.save_best_model(rewards, checkpoints, save_path, 'dreamer_v1_best')

                checkpoints = {
                    "transition_model": transition_model.state_dict(),
                    "observation_model": observation_model.state_dict(),
                    "reward_model": reward_model.state_dict(),
                    "encoder": encoder.state_dict(),
                    "actor_model": actor_model.state_dict(),
                    "value_model": value_model.state_dict(),
                    "param_act_opt": param_act_opt.state_dict(),
                    "param_value_opt": param_value_opt.state_dict(),
                    "param_opt": param_opt.state_dict(),
                    "param_opt_scheduler": param_opt_scheduler.state_dict(),
                    "param_act_opt_scheduler": param_act_opt_scheduler.state_dict(),
                    "param_value_opt_scheduler": param_value_opt_scheduler.state_dict(),
                    "frame_idx": frame_idx,
                    "train_count": train_count,
                }

                common.save_checkpoints(train_count, checkpoints, save_path, 'dreamer_v1')
                common.save_checkpoints(train_count, buffer, save_path_buffer, 'dreamer_v1_buffer')

    pass
