#!/usr/bin/env python3
'''
完成适配，参考链接：https://github.com/toshikwa/fqf-iqn-qrdqn.pytorch/tree/master

训练记录：
在2号机上训练
20250113:训练分数0.1，测试分数550.53，继续训练
20250114:学习率： 5e-05，训练分数0.1分，测试分数550.53分，暂停训练，查询训练分数为啥是0.1分，play模型
'''
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F

import torch.optim as optim

from tensorboardX import SummaryWriter

from lib import dqn_model, common

import ale_py

gym.register_envs(ale_py)
# n-step
REWARD_STEPS = 1

# priority replay
PRIO_REPLAY_ALPHA = 0.6
BETA_START = 0.4
BETA_FRAMES = 100000

QRDQN_N = 32
NUM_COSINES = 64
DUELING_NET = False
NOISY_NET = False


def eval_quantile_at_action(quantiles, actions):
    assert quantiles.size(0) == actions.size(0)

    batch_size = quantiles.size(0)
    N = quantiles.size(1)

    if len(actions.size()) == 1:
        action_index = actions[..., None, None].expand(batch_size, N, 1)
    else:
        action_index = actions[..., None].expand(batch_size, N, 1)
    sa_quantiles = quantiles.gather(dim=2, index=action_index)

    return sa_quantiles


def calculate_huber_loss(td_errors, kappa):
    return torch.where(
        td_errors.abs() <= kappa, 
        0.5 * td_errors.pow(2),
        kappa * (td_errors.abs() - 0.5 * kappa))



def calculate_quantile_huber_loss(td_errors, taus, weights=None, kappa=1.0):
    batch_size, N, N_dash = td_errors.shape

    element_wise_huber_loss = calculate_huber_loss(td_errors, kappa)

    element_wise_quantile_huber_loss = torch.abs(
        taus[..., None] - (td_errors.detach() < 0).float()) * element_wise_huber_loss / kappa
    
    batch_quantile_huber_loss = element_wise_quantile_huber_loss.sum(dim=1).mean(dim=1, keepdim=True)

    if weights is not None:
        quantile_huber_loss = (batch_quantile_huber_loss * weights).mean()
    else:
        quantile_huber_loss = batch_quantile_huber_loss.mean()

    return quantile_huber_loss


def calculate_fraction_loss(net, N, state_embeddings, sa_quantile_hats, taus,
                                actions, weights):
        batch_size = state_embeddings.shape[0]
        device = state_embeddings.device

        with torch.no_grad():
            sa_quantiles = eval_quantile_at_action(
                net.calculate_quantiles(
                    taus=taus[:, 1:-1], state_embeddings=state_embeddings),
                actions)

        # NOTE: Proposition 1 in the paper requires F^{-1} is non-decreasing.
        # I relax this requirements and calculate gradients of taus even when
        # F^{-1} is not non-decreasing.

        values_1 = sa_quantiles - sa_quantile_hats[:, :-1]
        signs_1 = sa_quantiles > torch.cat([
            sa_quantile_hats[:, :1], sa_quantiles[:, :-1]], dim=1)

        values_2 = sa_quantiles - sa_quantile_hats[:, 1:]
        signs_2 = sa_quantiles < torch.cat([
            sa_quantiles[:, 1:], sa_quantile_hats[:, -1:]], dim=1)

        gradient_of_taus = (
            torch.where(signs_1, values_1, -values_1)
            + torch.where(signs_2, values_2, -values_2)
        ).view(batch_size, N-1)

        # Gradients of the network parameters and corresponding loss
        # are calculated using chain rule.
        if weights is not None:
            fraction_loss = ((
                (gradient_of_taus * taus[:, 1:-1]).sum(dim=1, keepdim=True)
            ) * weights).mean()
        else:
            fraction_loss = \
                (gradient_of_taus * taus[:, 1:-1]).sum(dim=1).mean()

        return fraction_loss


def calculate_quantile_loss(net, tgt_net, state_embeddings, tau_hats,
                                current_sa_quantile_hats, actions, rewards,
                                next_states, dones, weights, gamma, kappa):
        assert not tau_hats.requires_grad

        with torch.no_grad():
            # NOTE: Current and target quantiles share the same proposed
            # fractions to reduce computations. (i.e. next_tau_hats = tau_hats)

            # Calculate Q values of next states.
            next_state_embeddings =\
                tgt_net.calculate_state_embeddding(next_states)
            next_q = tgt_net.calculate_q(
                state_embeddings=next_state_embeddings,
                fraction_net=net.fraction_net)

            # Calculate greedy actions.
            next_actions = torch.argmax(next_q, dim=1, keepdim=True)

            # Calculate quantile values of next states and actions at tau_hats.
            next_sa_quantile_hats = eval_quantile_at_action(
                tgt_net.calculate_quantiles(
                    taus=tau_hats, state_embeddings=next_state_embeddings),
                next_actions).transpose(1, 2)

            # Calculate target quantile values.
            target_sa_quantile_hats = rewards[..., None] + (
                1.0 - dones[..., None]) * gamma * next_sa_quantile_hats

        td_errors = target_sa_quantile_hats - current_sa_quantile_hats

        quantile_huber_loss = calculate_quantile_huber_loss(
            td_errors, tau_hats, weights, kappa)

        return quantile_huber_loss, next_q.detach().mean().item(), \
            td_errors.detach().abs().sum(dim=1).mean(dim=1, keepdim=True)



def calc_loss(batch, batch_weights, net, tgt_net, gamma, kappa=1.0, device="cpu", ent_coef=0.00):
    states, actions, rewards, dones, next_states = common.unpack_batch(batch)

    states_v = torch.tensor(states).to(device)
    actions_v = torch.tensor(actions).to(device)
    next_states_v = torch.tensor(next_states).to(device)
    dones_v = torch.ByteTensor(dones).to(device)
    rewards_v = torch.tensor(rewards).to(device)
    batch_weights_v = torch.tensor(batch_weights).to(device)

    state_embeddings = net.calculate_state_embeddding(states_v)
    taus, tau_hats, entropies = net.calculate_fractions(state_embeddings=state_embeddings.detach())

    current_sa_quantile_hats = eval_quantile_at_action(net.calculate_quantiles(tau_hats, state_embeddings=state_embeddings), actions_v)

    fraction_loss = calculate_fraction_loss(net, QRDQN_N, state_embeddings.detach(), current_sa_quantile_hats.detach(), taus, actions_v, batch_weights_v)

    quantile_loss, mean_q, errors = calculate_quantile_loss(net, tgt_net, state_embeddings, tau_hats, current_sa_quantile_hats, actions_v, rewards_v, next_states_v, dones_v, batch_weights_v, gamma, kappa)

    entropy_loss = -ent_coef * entropies.mean()

    return fraction_loss, quantile_loss, entropy_loss, errors


@torch.no_grad()
def test_model(env, net, device, episodes=5):
    total_reward = 0.0
    for _ in range(episodes):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
            logits_v = net.calculate_q(states=obs_v)
            logits = logits_v.data.cpu().numpy()
            action = np.argmax(logits)
            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            total_reward += reward
            if done or trunc:
                break   
    return total_reward / episodes

import os

def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


if __name__ == "__main__":
    params = common.HYPERPARAMS['entombed']
    params['epsilon_frames'] *= 2
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
    args = parser.parse_args()
    device = select_device(args)

    env = common.make_env(params['env_name'])
    test_env = common.make_env(params['env_name'])
    save_path = os.path.join("saves", "fqf-entombed")
    os.makedirs(save_path, exist_ok=True)
    logger = common.setup_logger(save_path)

    writer = SummaryWriter(comment="-" + params['run_name'] + "-qrdqn")
    net = dqn_model.FQFDQN(env.observation_space.shape, env.action_space.n, N_ATOMS=QRDQN_N, num_cosines=NUM_COSINES, dueling_net=DUELING_NET, noisy_net=NOISY_NET).to(device)
    tgt_net = ptan.agent.TargetNet(net)
    print(net)
    selector = ptan.actions.EpsilonGreedyActionSelector()
    epsilon_tracker = common.EpsilonTracker(selector, params)
    agent = ptan.agent.DQNAgent(lambda x: net.calculate_q(states=x), action_selector=selector, device=device)


    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'], steps_count=REWARD_STEPS)
    buffer = ptan.experience.PrioritizedReplayBuffer(exp_source, params['replay_size'], PRIO_REPLAY_ALPHA)
    frac_optimizer = optim.RMSprop(net.parameters(), lr=params['frac_learning_rate'], alpha=0.95, eps=0.00001)
    frac_scheduler = optim.lr_scheduler.StepLR(frac_optimizer, step_size=20000, gamma=0.9)
    quan_optimizer = optim.Adam(list(net.conv.parameters()) + list(net.cosine_net.parameters()) + list(net.quantile_net.parameters()), lr=params['learning_rate'], eps=1e-2/params['batch_size'])
    quan_scheduler = optim.lr_scheduler.StepLR(quan_optimizer, step_size=20000, gamma=0.9)

    frame_idx = 0
    train_count = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            net.load_state_dict(checkpoint['net'])
            tgt_net.target_model.load_state_dict(checkpoint['tgt_net'])
            frac_optimizer.load_state_dict(checkpoint['frac_optimizer'])
            quan_optimizer.load_state_dict(checkpoint['quan_optimizer'])
            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']
            frac_scheduler.load_state_dict(checkpoint['frac_scheduler'])
            quan_scheduler.load_state_dict(checkpoint['quan_scheduler'])
            selector.epsilon = checkpoint['epsilon']
            print("加载模型成功")
            # 打印学习率
            for param_group in quan_optimizer.param_groups:
                print("学习率：", param_group['lr'])

    beta = BETA_START

    with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
        while True:
            frame_idx += 1
            buffer.populate(1)
            epsilon_tracker.frame(frame_idx)
            beta = min(1.0, BETA_START + frame_idx * (1.0 - BETA_START) / BETA_FRAMES)

            new_rewards = exp_source.pop_total_rewards()
            if new_rewards:
                if reward_tracker.reward(new_rewards[0], frame_idx):
                    break

            if len(buffer) < params['replay_initial']:
                continue

            batch, batch_indices, batch_weights = buffer.sample(params['batch_size'], beta)
            frac_optimizer.zero_grad()
            quan_optimizer.zero_grad()
            frac_loss, quan_loss, entropy_loss, errors = calc_loss(batch, batch_weights, net, tgt_net.target_model,
                                               params['gamma'] ** REWARD_STEPS, device=device)
            frac_loss.backward(retain_graph=True)
            frac_optimizer.step()
            quan_loss.backward()
            quan_optimizer.step()
            # 如果遇到梯度爆炸，可以使用clip_grad_norm进行梯度裁剪
            buffer.update_priorities(batch_indices, errors.data.cpu().numpy())
            # scheduler.step()
            train_count += 1

            if train_count % params['target_net_sync'] == 0:
                tgt_net.sync()
                checkpoint = {
                    "net": net.state_dict(),
                    "tgt_net": tgt_net.target_model.state_dict(),
                    "frac_optimizer": frac_optimizer.state_dict(),
                    "quan_optimizer": quan_optimizer.state_dict(),
                    "frame_idx": frame_idx,
                    "frac_scheduler": frac_scheduler.state_dict(),
                    "quan_scheduler": quan_scheduler.state_dict(),
                    "train_count": train_count,
                    "epsilon": selector.epsilon
                }
                common.save_checkpoints(frame_idx, checkpoint, save_path, "qrdqn", keep_last=5)
                print(f"Saved checkpoint to {save_path}")

            if train_count % 2000 == 0:
                net.eval()
                test_rewards = test_model(test_env, net, device)
                net.train()

                common.save_best_model(test_rewards, net.state_dict(), save_path, "qrdqn-best", keep_best=10)
                print(f"save best model, current test score: {test_rewards}")

