#!/usr/bin/env python3
'''
完成适配，参考链接：https://github.com/toshikwa/fqf-iqn-qrdqn.pytorch/tree/master

训练记录：
在笔记本上训练
20250109：训练分数-17.6分，测试分数0分，继续训练
20250110：学习率： 0.00025，训练分数-18.1分，测试分数0分，暂停训练
'''
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F

import torch.optim as optim

from tensorboardX import SummaryWriter

from lib import dqn_model, common

import ale_py

gym.register_envs(ale_py)
# n-step
REWARD_STEPS = 1

# priority replay
PRIO_REPLAY_ALPHA = 0.6
BETA_START = 0.4
BETA_FRAMES = 100000


def eval_quantile_at_action(quantiles, actions):
    assert quantiles.size(0) == actions.size(0)

    batch_size = quantiles.size(0)
    N = quantiles.size(1)

    if len(actions.size()) == 1:
        action_index = actions[..., None, None].expand(batch_size, N, 1)
    else:
        action_index = actions[..., None].expand(batch_size, N, 1)
    sa_quantiles = quantiles.gather(dim=2, index=action_index)

    return sa_quantiles


def calculate_huber_loss(td_errors, kappa):
    return torch.where(
        td_errors.abs() <= kappa, 
        0.5 * td_errors.pow(2),
        kappa * (td_errors.abs() - 0.5 * kappa))



def calculate_quantile_huber_loss(td_errors, taus, weights=None, kappa=1.0):
    batch_size, N, N_dash = td_errors.shape

    element_wise_huber_loss = calculate_huber_loss(td_errors, kappa)

    element_wise_quantile_huber_loss = torch.abs(
        taus[..., None] - (td_errors.detach() < 0).float()) * element_wise_huber_loss / kappa
    
    batch_quantile_huber_loss = element_wise_quantile_huber_loss.sum(dim=1).mean(dim=1, keepdim=True)

    if weights is not None:
        quantile_huber_loss = (batch_quantile_huber_loss * weights).mean()
    else:
        quantile_huber_loss = batch_quantile_huber_loss.mean()

    return quantile_huber_loss



def calc_loss(batch, batch_weights, net, tgt_net, gamma, weights=None, kappa=1.0, device="cpu"):
    states, actions, rewards, dones, next_states = common.unpack_batch(batch)

    states_v = torch.tensor(states).to(device)
    actions_v = torch.tensor(actions).to(device)
    next_states_v = torch.tensor(next_states).to(device)
    dones_v = torch.ByteTensor(dones).to(device)
    rewards_v = torch.tensor(rewards).to(device)
    # batch_weights_v = torch.tensor(batch_weights).to(device)

    current_sa_qualtiles = eval_quantile_at_action(net(states_v), actions_v)

    with torch.no_grad():
        next_q = tgt_net.calculate_q(states_v)
        next_actions = torch.argmax(next_q, dim=1, keepdim=True)
        next_sa_quantiles = eval_quantile_at_action(tgt_net(next_states_v), next_actions).transpose(1, 2)
        target_sa_quantiles = rewards_v[..., None, None] + (1.0 - dones_v[..., None, None]) * gamma * next_sa_quantiles

    td_errors = target_sa_quantiles - current_sa_qualtiles
    quantile_huber_loss = calculate_quantile_huber_loss(td_errors, tau_hats, weights, kappa)

    return quantile_huber_loss, next_q.detach().mean().item, td_errors.detach().abs().sum(dim=1).mean(dim=1, keepdim=True)


@torch.no_grad()
def test_model(env, net, device, episodes=5):
    total_reward = 0.0
    for _ in range(episodes):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
            logits_v = net.calculate_q(obs_v)
            logits = logits_v.data.cpu().numpy()
            action = np.argmax(logits)
            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            total_reward += reward
            if done or trunc:
                break   
    return total_reward / episodes

import os

def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


if __name__ == "__main__":
    params = common.HYPERPARAMS['doubledunk']
    params['epsilon_frames'] *= 2
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
    args = parser.parse_args()
    device = select_device(args)

    env = common.make_env(params['env_name'])
    test_env = common.make_env(params['env_name'])
    save_path = os.path.join("saves", "qrdqn-doubledunk")
    os.makedirs(save_path, exist_ok=True)

    QRDQN_N = 200

    writer = SummaryWriter(comment="-" + params['run_name'] + "-qrdqn")
    net = dqn_model.QRDQN(env.observation_space.shape, env.action_space.n, N_ATOMS=QRDQN_N).to(device)
    tgt_net = ptan.agent.TargetNet(net)
    print(net)
    selector = ptan.actions.EpsilonGreedyActionSelector()
    epsilon_tracker = common.EpsilonTracker(selector, params)
    agent = ptan.agent.DQNAgent(lambda x: net.calculate_q(x), action_selector=selector, device=device)


    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'], steps_count=REWARD_STEPS)
    buffer = ptan.experience.PrioritizedReplayBuffer(exp_source, params['replay_size'], PRIO_REPLAY_ALPHA)
    optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'], eps=1e-2/params['batch_size'])
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20000, gamma=0.9)

    taus = torch.arange(0, QRDQN_N + 1, device=device, dtype=torch.float32) / float(QRDQN_N)
    tau_hats = ((taus[1:] + taus[:-1]) / 2.0).view(1, QRDQN_N)

    frame_idx = 0
    train_count = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        net.load_state_dict(checkpoint['net'])
        tgt_net.target_model.load_state_dict(checkpoint['tgt_net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        frame_idx = checkpoint['frame_idx']
        train_count = checkpoint['train_count']
        scheduler.load_state_dict(checkpoint['scheduler'])
        selector.epsilon = checkpoint['epsilon']
        print("加载模型成功")
        # 打印学习率
        for param_group in optimizer.param_groups:
            print("学习率：", param_group['lr'])

    beta = BETA_START

    with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
        while True:
            frame_idx += 1
            buffer.populate(1)
            epsilon_tracker.frame(frame_idx)
            beta = min(1.0, BETA_START + frame_idx * (1.0 - BETA_START) / BETA_FRAMES)

            new_rewards = exp_source.pop_total_rewards()
            if new_rewards:
                if reward_tracker.reward(new_rewards[0], frame_idx):
                    break

            if len(buffer) < params['replay_initial']:
                continue

            optimizer.zero_grad()
            batch, batch_indices, batch_weights = buffer.sample(params['batch_size'], beta)
            quantile_loss, mean_q, errors = calc_loss(batch, batch_weights, net, tgt_net.target_model,
                                               params['gamma'] ** REWARD_STEPS, device=device)
            quantile_loss.backward()
            # 如果遇到梯度爆炸，可以使用clip_grad_norm进行梯度裁剪
            optimizer.step()
            buffer.update_priorities(batch_indices, errors.data.cpu().numpy())
            # scheduler.step()
            train_count += 1

            if train_count % params['target_net_sync'] == 0:
                tgt_net.sync()
                checkpoint = {
                    "net": net.state_dict(),
                    "tgt_net": tgt_net.target_model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                    "frame_idx": frame_idx,
                    "scheduler": scheduler.state_dict(),
                    "train_count": train_count,
                    "epsilon": selector.epsilon
                }
                common.save_checkpoints(frame_idx, checkpoint, save_path, "qrdqn", keep_last=5)
                print(f"Saved checkpoint to {save_path}")

            if train_count % 2000 == 0:
                net.eval()
                test_rewards = test_model(test_env, net, device)
                net.train()

                common.save_best_model(test_rewards, net.state_dict(), save_path, "qrdqn-best", keep_best=10)
                print(f"save best model, current test score: {test_rewards}")

