#!/usr/bin/env python3
"""
未适配,缺少教师模型，参考链接：https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/qdagger_dqn_atari_impalacnn.py#L27
"""
import collections

import gymnasium as gym
import ptan
import numpy as np
import argparse
import os
import time

import torch
import torch.optim as optim
import torch.nn.functional as F

from tensorboardX import SummaryWriter
from typing import Any
from lib import dqn_model, common
import ale_py

gym.register_envs(ale_py)

BETA_START = 0.1
BETA_FRAMES = 1e8

TEACHER_STEPS = 500000

class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info

class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            reward //= 10 # 缩放奖励
            if reward == 0:
                reward = 1
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    env = common.ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = ptan.common.wrappers.FrameStack(env, stack_frames)
    env = RewardPenaltyWrapper(env)
    return env


class PrioReplayBuffer:
    """
    prob_alpha: 样本优先级重视程度参数，如果是0则不重视优先级
    priorities: 样本缓冲区，每一个样本的重置程度队列 初始值为0
    """
    def __init__(self, exp_source, buf_size, prob_alpha=0.6):
        self.exp_source_iter = iter(exp_source)
        self.prob_alpha = prob_alpha
        self.capacity = buf_size
        self.pos = 0
        self.buffer = []
        self.priorities = np.zeros((buf_size, ), dtype=np.float32)

    def __len__(self):
        return len(self.buffer)

    def populate(self, count):
        # 获取最大的优先级，如果buffer存在样本，则从样本优先级缓冲区获取，否则默认为1.0
        # 意味每一个样本同样重视程度
        # 而这个max_prio最大优先级的重视程度是用来给最新的样本
        max_prio = self.priorities.max() if self.buffer else 1.0
        for _ in range(count):
            sample = next(self.exp_source_iter)
            if len(self.buffer) < self.capacity:
                self.buffer.append(sample)
            else:
                self.buffer[self.pos] = sample
            self.priorities[self.pos] = max_prio
            self.pos = (self.pos + 1) % self.capacity

    def sample(self, batch_size, beta=0.4):
        """
        采样

        beta参数的作用：
        beta参数在sample方法中的作用是调整重要性采样权重（importance sampling weights）的计算。重要性采样权重用于在训练过程中平衡不同样本对网络更新的贡献。

    在sample方法中，首先根据优先级概率抽取一批样本。由于这种抽样方式可能导致某些样本被过度抽样，因此需要使用重要性采样权重来修正网络更新。beta参数决定了这种修正的强度。

    具体来说，当beta值较小时，修正的强度较弱，导致网络更新主要受到高优先级样本的影响。当beta值较大时，修正的强度较强，使得网络更新更均衡地考虑不同优先级的样本。

    通常，可以在训练开始时设置较小的beta值，并在训练过程中逐渐增大，以实现动态调整。这种策略可以使网络在训练初期更快地收敛到较好的策略，同时在后期避免过度关注某些样本。

    在sample函数中，beta参数会影响所有抽取到的样本。通过计算每个样本的重要性采样权重，beta值会调整这些权重的大小，从而影响网络更新过程中每个样本的贡献。

在计算重要性采样权重时，权重与优先级概率成反比。较大的beta值会使权重相对均衡，降低高优先级样本的权重并提高低优先级样本的权重。这样可以使网络在训练过程中更公平地考虑不同优先级的样本。
        """
        # 根据采样的样本缓冲区长度，获取对应的样本优先级重视程度
        if len(self.buffer) == self.capacity:
            prios = self.priorities
        else:
            prios = self.priorities[:self.pos]
        # 每个样本的优先级首先进行alpha次方计算
        probs = prios ** self.prob_alpha

        # 再除以综合计算平均值得到每个样本的优先级
        # 这一段的计算公示在书p132页
        # 将优先级转换为概率
        probs /= probs.sum()
        # 根据样本的重视程度进行优先级采样
        indices = np.random.choice(len(self.buffer), batch_size, p=probs)
        samples = [self.buffer[idx] for idx in indices]
        # 计算每个样本的权重 计算公式在书P133
        total = len(self.buffer)
        weights = (total * probs[indices]) ** (-beta)
        weights /= weights.max()
        return samples, indices, np.array(weights, dtype=np.float32)

    def update_priorities(self, batch_indices, batch_priorities):
        """
        更新每个样本的优先级重视度
        样本的权重值来源于在计算误差时，每一个样本的误差损失之，误差损失之越大，那么
        表示这个样本需要重视，重新参与计算的可能性需要越大
        batch_indices: 本次训练采样的样本索引
        batch_priorities:
        """
        for idx, prio in zip(batch_indices, batch_priorities):
            self.priorities[idx] = prio


def calc_loss(batch, batch_weights, net, tgt_net, gamma, device="cpu"):
    """
    batch: 本次采样的样本
    batch_weights: 每个样本的权重大小

    return: 返回的损失与之前的不一致，返回的损失中没有直接返回MSELoss，经过样本权重修改后的MSELoss值 以及 losses_v + 1e-5的值买这个值的作用就是更新样本缓冲区的优先级值
    """
    states, actions, rewards, dones, next_states = common.unpack_batch(batch)

    states_v = torch.tensor(states).to(device)
    next_states_v = torch.tensor(next_states).to(device)
    actions_v = torch.tensor(actions).to(device)
    rewards_v = torch.tensor(rewards).to(device)
    done_mask = torch.ByteTensor(dones).to(device)
    batch_weights_v = torch.tensor(batch_weights).to(device)

    state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
    next_state_values = tgt_net(next_states_v).max(1)[0]
    next_state_values[done_mask.bool()] = 0.0

    expected_state_action_values = next_state_values.detach() * gamma + rewards_v
    # 权重值大的，这里计算后会放大误差，权重值小的则缩小误差，使最新的样本（误差越大的样本）能够快速参与改变网路而不受就样本的干扰，提高拟合 的速度
    # 而且如果期待值误差较大就会提高优先级，让其参与下一次的训练，多次训练以提高网络的预测参数，避免那些已经拟合的参数再次参与训练
    losses_v = batch_weights_v * (state_action_values - expected_state_action_values) ** 2
    # 1e-5的含义书P135
    # 在这段代码中，1e-5是一个较小的常数，它被添加到losses_v中以避免优先级值为零的情况。losses_v表示每个样本的TD误差的平方，而sample_prios_v表示每个样本的优先级值。在更新优先级时，需要使用非零值以确保所有样本都有一定的抽样概率。1e-5的作用是确保即使TD误差非常接近于零，每个样本仍然具有一定的优先级。
    #
    # 通过添加这个较小的常数，可以防止样本由于优先级过低而被完全忽略。这样可以确保所有样本都能在训练过程中得到一定程度的关注。
    return losses_v.mean(), losses_v + 1e-5


def test_model(env, net, device, episodes=5):
    with torch.no_grad():
        total_reward = 0.0
        for _ in range(episodes):
            noop_action_count = 0
            pre_action = -1
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                logits_v = net(obs_v)
                probs_v = F.softmax(logits_v, dim=1)
                probs = probs_v.data.cpu().numpy()
                action = np.argmax(probs)
                if action == 0 and pre_action == action:  # Noop
                    noop_action_count += 1
                    if noop_action_count > 30:
                        break
                else:
                    noop_action_count = 0
                pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                total_reward += reward
                if done or trunc:
                    break
        return total_reward / episodes


def evaluate(env, net, device, episodes=10):
    with torch.no_grad():
        reward_returns = []
        for _ in range(episodes):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                logits_v = net(obs_v)
                probs_v = F.softmax(logits_v, dim=1)
                probs = probs_v.data.cpu().numpy()
                action = np.argmax(probs)
                obs, reward, done, trunc, _ = env.step(action)
                reward_returns += [reward]
                if done or trunc:
                    break
        return reward_returns


def evaluate_rainbow(env, net, device, episodes=10):
    with torch.no_grad():
        reward_returns = []
        for _ in range(episodes):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                logits_v = net.qvals(obs_v)
                probs_v = F.softmax(logits_v, dim=1)
                probs = probs_v.data.cpu().numpy()
                action = np.argmax(probs)
                obs, reward, done, trunc, _ = env.step(action)
                reward_returns += [reward]
                if done or trunc:
                    break
        return reward_returns


if __name__ == "__main__":
    params = {
        'env_name':         "ALE/Carnival-v5",
        'stop_reward':      50000.0,
        'run_name':         'dqn_qdragger_',
        'replay_size':      50 ** 6,
        'replay_initial':   20000,
        'target_net_sync':  10000,
        'epsilon_frames':   1e8,
        'epsilon_start':    1.0,
        'epsilon_final':    0.1,
        'learning_rate':    0.00025,
        'gamma':            0.99,
        'batch_size':       64,
        'temperature':      1.0,
    }
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
    save_path = os.path.join("saves", params['run_name'])

    env = wrap_dqn(gym.make(params['env_name'], obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    test_env = wrap_dqn(gym.make(params['env_name'], obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)

    writer = SummaryWriter(comment="-" + params['run_name'] + "-prio-replay")
    net = dqn_model.DQNDragger(env.observation_space.shape, env.action_space.n).to(device)
    tgt_net = ptan.agent.TargetNet(net)

    optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])

    frame_idx = 0
    beta = params['epsilon_start']
    best_loss = float('inf')
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        frame_idx = checkpoint['frame_idx']
        net.load_state_dict(checkpoint['net'])
        tgt_net.target_model.load_state_dict(checkpoint['tgt_net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        beta = checkpoint['beta']
        best_loss = checkpoint['best_loss']
        # scheduler.load_state_dict(checkpoint['scheduler'])
        print("加载模型成功")
    # 这里不同，没有eval_states， 而是BETA，样本权重参数

    teacher_model_path = ""
    if not os.path.exists(teacher_model_path):
        raise "not found teacher model"

    teacher_model = dqn_model.RainbowDQN(env.observation_space.shape, env.action_space.n).to(device)
    teacher_model.load_state_dict(torch.load(teacher_model_path, map_location=device, weights_only=False))
    teacher_model.eval()

    teacher_episodic_returns = evaluate_rainbow(test_env, teacher_model, device, episodes=10)

    teacher_selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params['epsilon_start'])
    teacher_epsilon_tracker = common.EpsilonTracker(teacher_selector, params['epsilon_start'], params['epsilon_final'], params['epsilon_frames'])
    teacher_agent = ptan.agent.DQNAgent(net, teacher_selector, device=device)
    teacher_exp_source = ptan.experience.ExperienceSourceFirstLast(env, teacher_agent, gamma=params['gamma'], steps_count=1)
    # 这里不同，采用的是具备优先级的经验重放缓冲区
    teacher_buffer = ptan.experience.ExperienceReplayBuffer(teacher_exp_source, params['replay_size'])
    logger = common.setup_logger(save_path)

    with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
        for idx in range(TEACHER_STEPS):
            teacher_buffer.populate(1)
            teacher_epsilon_tracker.frame(idx)
            # 根据书中描述，BETA的值需要慢慢随着训练增加到1比较有利于收敛
            new_rewards = teacher_exp_source.pop_total_rewards()
            if new_rewards:
                print("teacher model reward:", new_rewards[0])

        # 采集教师模型完成，开始离线训练学生模型
        for idx in range(TEACHER_STEPS):
            frame_idx += 1
            batch = teacher_buffer.sample(params['batch_size'])
            states, actions, rewards, dones, _ = common.unpack_batch(batch)
            states_v = torch.tensor(states).to(device)
            actions_v = torch.tensor(actions).to(device)
            done_mask = torch.ByteTensor(dones).to(device)
            with torch.no_grad():
                td_target  = common.calc_loss_dqn(batch, tgt_net.target_model, params['gamma'], device=device) / params['temperature']
                teacher_q_values = teacher_model.qvals(states_v) / params['temperature']


            student_q_values = net(states_v)
            old_val = student_q_values.gather(1, actions_v.unsqueeze(1)).squeeze()
            q_loss = F.mse_loss(td_target, old_val)

            student_q_values = student_q_values / params['temperature']
            distill_loss = torch.mean(common.kl_divergence_with_logits(student_q_values, teacher_q_values))

            loss = q_loss + 1.0 * distill_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if idx % params['target_net_sync'] == 0:
                tgt_net.sync()


            if idx % 100 == 0:
                writer.add_scalar("offline/loss", loss.item(), frame_idx)
                writer.add_scalar("offline/q_loss", q_loss.item(), frame_idx)
                writer.add_scalar("offline/distill_loss", distill_loss.item(), frame_idx)
                print("offline/loss: ", loss.item())
                logger.info("offline/loss: %f", loss.item())

            if idx % 10000 == 0:
                net.eval()
                test_reward = test_model(test_env, net, device=device, episodes=5)
                net.train()
                print(f"Test reward: {test_reward:.2f}")
                common.save_best_model(test_reward, net.state_dict(), save_path, "dqn-qdragger-best", keep_best=10)
                checkpoint = {
                    'frame_idx': frame_idx,
                    'net': net.state_dict(),
                    'tgt_net': tgt_net.target_model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'beta': beta,
                    'best_loss': best_loss
                }
                common.save_checkpoints(frame_idx, checkpoint, save_path, "qdragger", keep_last=5)

        selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params['epsilon_start'])
        epsilon_tracker = common.EpsilonTracker(selector, params['epsilon_start'],
                                                        params['epsilon_final'], params['epsilon_frames'])
        agent = ptan.agent.DQNAgent(net, selector, device=device)
        exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'],
                                                                       steps_count=1)
        # 这里不同，采用的是具备优先级的经验重放缓冲区
        buffer = ptan.experience.ExperienceReplayBuffer(exp_source, params['replay_size'])

        episodic_returns = collections.deque(maxlen=10)
        start_time = time.time()

        while True:
            frame_idx += 1
            buffer.populate(1)
            epsilon_tracker.frame(frame_idx)
            # 根据书中描述，BETA的值需要慢慢随着训练增加到1比较有利于收敛
            beta = min(1.0, BETA_START + frame_idx * (1.0 - BETA_START) / BETA_FRAMES)

            new_rewards = exp_source.pop_total_rewards()
            if new_rewards:
                episodic_returns.append(new_rewards[0])
                writer.add_scalar("beta", beta, frame_idx)
                if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
                    break

            if len(buffer) < params['replay_initial']:
                continue

            states, actions, rewards, dones, next_states = common.unpack_batch(batch)
            states_v = torch.tensor(states).to(device)
            actions_v = torch.tensor(actions).to(device)
            done_mask = torch.ByteTensor(dones).to(device)
            rewards_v = torch.tensor(rewards).to(device)
            next_states_v = torch.tensor(next_states).to(device)

            optimizer.zero_grad()
            if len(episodic_returns) < 10:
                distill_coeff = 1.0
            else:
                distill_coeff = max(1 - np.mean(episodic_returns) / np.mean(teacher_episodic_returns), 0)

            with torch.no_grad():
                target_max = tgt_net.target_model(next_states_v).max(dim=1)[0]
                td_target = rewards_v + (1 - done_mask) * params['gamma'] * target_max
                teacher_q_values = teacher_model.qvals(states_v) / params['temperature']

            student_q_values = net(states_v)
            old_val = student_q_values.gather(1, actions_v.unsqueeze(1)).squeeze()
            q_loss = F.mse_loss(td_target, old_val)

            student_q_values = student_q_values / params['temperature']
            distill_loss = torch.mean(common.kl_divergence_with_logits(student_q_values, teacher_q_values))

            loss = q_loss + distill_coeff * distill_loss
            if frame_idx % 100 == 0:
                writer.add_scalar("online/loss", loss.item(), frame_idx)
                writer.add_scalar("online/q_loss", q_loss.item(), frame_idx)
                writer.add_scalar("online/distill_loss", distill_loss.item(), frame_idx)
                writer.add_scalar("online/q_values", old_val.mean().item(), frame_idx)
                writer.add_scalar("charts/distill_coeff", distill_coeff, frame_idx)
                writer.add_scalar("charts/SPS", int(frame_idx / (time.time() - start_time)))

                print("SPS:", int(frame_idx / (time.time() - start_time)))
                print("online/loss: ", loss.item())
                logger.info("online/loss: %f", loss.item())

            loss.backward()
            optimizer.step()

            if frame_idx % params['target_net_sync'] == 0:
                tgt_net.sync()
                # Test the model
                test_reward = test_model(test_env, net, device=device, episodes=5)
                print(f"Test reward: {test_reward:.2f}")
                common.save_best_model(test_reward, net.state_dict(), save_path, "qdragger-best", keep_best=10)


                checkpoint = {
                    'frame_idx': frame_idx,
                    'net': net.state_dict(),
                    'tgt_net': tgt_net.target_model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'beta': beta,
                    'best_loss': best_loss
                }
                common.save_checkpoints(frame_idx, checkpoint, save_path, "qdragger-", keep_last=5)
