#!/usr/bin/env python3
'''
完成适配CURL：100%符合参考代码的部分

参考链接：
1. https://github.com/lutery/curl_rainbow

训练记录：
20250501: 训练分非常慢，查明原因，已经开启了cuda
20250502: 原因是在数据采样时，很容易陷入10次重采样，导致计算速度过慢，测试分数0分，训练分数-7.944
20250505: 测试分数-0分，训练分数-10分，停止训练，加入a2c算法验证是否可以训练
'''
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import yaml
import pathlib
import sys
import random
import os

from tensorboardX import SummaryWriter

from lib import model, common

import ale_py

gym.register_envs(ale_py)

# C51
Vmax = 10
Vmin = -10
N_ATOMS = 51
DELTA_Z = (Vmax - Vmin) / (N_ATOMS - 1)


class Trainer:

    def __init__(self, params, device):
        self.params = params
        self.device = device
        self.batch_size = params['batch_size']
        self.replay_frequency = params['replay_frequency']
        self.discount = params['discount_factor']
        self.frame_idx = 0
        self.episodes = 5
        self.n = params['multi_step']
        self.Vmin = params['value_min']
        self.Vmax = params['value_max']
        self.atoms = params['atoms']
        self.delta_z = (self.Vmax - self.Vmin) / (self.atoms - 1)
        self.coeff = params['coeff']
        self.norm_clip = params['gradient_norm_clip']

        self.save_path = os.path.join("saves", "rainbow-curl-kingkong")
        os.makedirs(self.save_path, exist_ok=True)
        self.save_path_buffer = os.path.join("saves", "rainbow-curl-kingkong-buffer")
        os.makedirs(self.save_path_buffer, exist_ok=True)
        self.writer = SummaryWriter(comment="-" + params['run_name'] + "-rainbow-curl")

        self.build_env()
        self.build_model()

    
    def build_env(self):
        self.env = common.wrap_dqn(params['game'])
        self.test_env = common.wrap_dqn(params['game'])  
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.n


    def build_model(self):
        self.net = model.RainbowDQN(self.obs_shape, self.action_shape, self.params).to(device)
        self.momentum_net = ptan.agent.TargetNet(self.net, requre_grad=False)
        self.tgt_net = ptan.agent.TargetNet(self.net, requre_grad=False)
        print(self.net)

        self.agent = ptan.agent.DQNAgent(lambda x: self.net.qvals(x), ptan.actions.ArgmaxActionSelector(), device=device)
        self.exp_source = ptan.experience.ExperienceSource(self.env, self.agent, steps_count=1)
        # todo 优先级重放缓冲区等待适配源代码
        self.buffer = common.PrioritizedReplayBuffer(self.params, self.params['memory_capacity'], self.exp_source)
        self.optimizer = optim.Adam(self.net.parameters(), lr=self.params['learning_rate'], eps=self.params['adam_epsilon'])
        self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=200000, gamma=0.9)

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
            self.net.load_state_dict(checkpoint['net'])
            self.momentum_net.target_model.load_state_dict(checkpoint['momentum_net'])
            self.tgt_net.target_model.load_state_dict(checkpoint['tgt_net'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.frame_idx = checkpoint['frame_idx']
            self.scheduler.load_state_dict(checkpoint['scheduler'])
            print("加载模型成功")
            # 打印学习率
            for param_group in self.optimizer.param_groups:
                print("学习率：", param_group['lr'])

            
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path_buffer)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                self.buffer = torch.load(os.path.join(self.save_path_buffer, checkpoints[-1]))
                self.buffer.set_exp_source(self.exp_source)
                print("加载buffer成功")


    def save_model(self):
        checkpoint = {
            "net": self.net.state_dict(),
            "momentum_net": self.momentum_net.target_model.state_dict(),
            "tgt_net": self.tgt_net.target_model.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "frame_idx": self.frame_idx,
            "scheduler": self.scheduler.state_dict()
        }
        common.save_checkpoints(self.frame_idx, checkpoint, self.save_path, "rainbow-curl", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")

        common.save_checkpoints(self.frame_idx, self.buffer, self.save_path_buffer, "rainbow-curl-buffer", keep_last=5)
        print(f"Saved buffer to {self.save_path_buffer}")

    def __train(self):
        # Sample transitions
        idxs, states, actions, returns, next_states, nonterminals, weights = self.buffer.sample(self.batch_size)
        # 对states进行数据增强，即随机裁剪、填充、再随机裁剪
        # 这里生产了两个大体相同但是又有差异的状态，应该是分别传入到online_net和momentum_net中
        # 这里是需要特意产生的，后续有用
        states = torch.tensor(states).to(device=self.device)
        aug_states_1 = common.aug(states).to(device=self.device)
        aug_states_2 = common.aug(states).to(device=self.device)
        actions = torch.tensor(actions).to(device=self.device)
        returns = torch.tensor(returns).to(device=self.device)
        next_states = torch.tensor(next_states).to(device=self.device)
        nonterminals = torch.tensor(nonterminals).to(device=self.device).unsqueeze(-1)
        weights = torch.tensor(weights).to(device=self.device)
        # Calculate current state probabilities (online network noise already sampled)
        # 先用原始的观察生成动作Q值分布
        log_ps, _ = self.net(states)  # Log probabilities log p(s_t, ·; θonline)
        # 在用online_net的输出对比学习的特征
        _, z_anch = self.net(aug_states_1)
        # 使用momentum_net的输出对比学习的特征
        _, z_target = self.momentum_net(aug_states_2)
        # 将最比学习的特征投射到一个新的特征空间，可以得到更鲁棒的特征表示
        # todo 是否也表示可以不进行映射？
        z_proj = torch.matmul(self.net.W, z_target.T)
        # 然后与锚点特征 z_anch 计算相似性（通过矩阵乘法）
        '''
        作用: 计算锚点特征 z_anch 和目标特征 z_proj 的相似性。
        z_anch 是从 aug_states_1（增强后的状态）中提取的特征。
        z_proj 是从 aug_states_2（增强后的状态）中提取的特征，并通过 W 投影到新的特征空间。
        矩阵乘法计算了 z_anch 和 z_proj 之间的点积，表示它们在特征空间中的相似性。
        CURL中的作用:
        点积相似性用于对比学习的损失计算，目标是让正样本对（来自同一状态的增强视图）具有更高的相似性。
        '''
        logits = torch.matmul(z_anch, z_proj)
        '''
        作用: 对 logits 进行数值稳定性处理。
        torch.max(logits, 1)[0] 计算每一行的最大值。
        [:, None] 将最大值扩展为列向量，方便与 logits 的每一行相减。
        通过减去最大值，避免在后续计算 softmax 时出现数值溢出问题。
        CURL中的作用:
        确保对比学习的数值计算稳定，避免梯度爆炸或溢出
        '''
        logits = (logits - torch.max(logits, 1)[0][:, None])
        '''
        作用: 缩放 logits 的值。
        乘以一个小的缩放因子（0.1），使得 logits 的范围更小。
        这会影响 softmax 的输出分布，使其更加平滑。
        '''
        logits = logits * 0.1
        '''
        作用: 生成对比学习的目标标签。
        torch.arange(logits.shape[0]) 生成从 0 到 batch_size-1 的整数序列。
        每个样本的标签是其对应的索引，表示正样本对的匹配关系。
        '''
        labels = torch.arange(logits.shape[0]).long().to(device=self.device)
        '''
        moco_loss = (nn.CrossEntropyLoss()(logits, labels)).to(device=self.device)
        作用: 计算对比学习的损失。
        使用交叉熵损失（CrossEntropyLoss）来优化 logits 和目标标签之间的匹配。
        正样本对的相似性被最大化，负样本对的相似性被最小化。
        CURL中的作用:
        对比学习的核心目标是通过优化交叉熵损失，让模型学习到更鲁棒的状态表示。
        这种表示能够捕获状态的本质特征，从而提升强化学习的性能。

        
        这是对比学习中常用的一种“行对列”匹配方式：  
        - 每个样本在 batch 中都有唯一的“正确匹配”（即自身）。  
        - 因此，行 i 的正确标签就是整数 i。  
        - 用交叉熵损失来要求“行 i 与列 i”这一对的相似度最高，其他列为负样本。  

        通过将标签设为 0 到 batch_size-1，保证第 i 行只在第 i 列处具有最高分，使得同一增强视图的样本两两匹配，形成正样本对。

        todo 后续模型运行时查看其的size和尺寸
        '''
        moco_loss = (nn.CrossEntropyLoss()(logits, labels)).to(device=self.device)

        # 获取实际执行动作的概率分布
        log_ps_a = log_ps[range(self.batch_size), actions]  # log p(s_t, a_t; θonline)

        with torch.no_grad():
            # Calculate nth next state probabilities
            # 先用训练的online_net来预测下一个状态的动作分布选择最大Q值的动作
            pns, _ = self.net(next_states)  # Probabilities p(s_t+n, ·; θonline)
            dns = self.net.supports.expand_as(pns) * pns  # Distribution d_t+n = (z, p(s_t+n, ·; θonline))
            argmax_indices_ns = dns.sum(2).argmax(1)  # Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
            # 然后用目标网络来预测下一个状态的Q值动作分布
            self.tgt_net.target_model.reset_noise()  # Sample new target net noise
            pns, _ = self.tgt_net.target_model(next_states)  # Probabilities p(s_t+n, ·; θtarget)
            # 在用online_net选择的动作来选择下一个状态的Q值，这边时double dqn的计算公式
            pns_a = pns[range(self.batch_size), argmax_indices_ns]  # Double-Q probabilities p(s_t+n, argmax_a[(z, p(s_t+n, a; θonline))]; θtarget)

            # Compute Tz (Bellman operator T applied to z)
            # todo 对比这里的n步dqnde的未来折扣值和我的计算有什么区别
            Tz = returns.unsqueeze(1) + nonterminals * (self.discount ** self.n) * self.net.supports.unsqueeze(0)  # Tz = R^n + (γ^n)z (accounting for terminal states)
            Tz = Tz.clamp(min=self.Vmin, max=self.Vmax)  # Clamp between supported values
            # Compute L2 projection of Tz onto fixed supports z
            b = (Tz - self.Vmin) / self.delta_z  # b = (Tz - Vmin) / Δz
            l, u = b.floor().to(torch.int64), b.ceil().to(torch.int64)
            # Fix disappearing probability mass when l = b = u (b is int)
            l[(u > 0) * (l == u)] -= 1
            u[(l < (self.atoms - 1)) * (l == u)] += 1

            # 这边实在计算分布dqn吧
            # Distribute probability of Tz
            m = states.new_zeros(self.batch_size, self.atoms)
            offset = torch.linspace(0, ((self.batch_size - 1) * self.atoms), self.batch_size).unsqueeze(1).expand(self.batch_size, self.atoms).to(actions)
            m.view(-1).index_add_(0, (l + offset).view(-1), (pns_a * (u.float() - b)).view(-1))  # m_l = m_l + p(s_t+n, a*)(u - b)
            m.view(-1).index_add_(0, (u + offset).view(-1), (pns_a * (b - l.float())).view(-1))  # m_u = m_u + p(s_t+n, a*)(b - l)

        # 计算分布dqn的kl损失
        loss = -torch.sum(m * log_ps_a, 1)  # Cross-entropy loss (minimises DKL(m||p(s_t, a_t)))
        # 综合分布dqn的损失+对比损失
        loss = loss + (moco_loss * self.coeff)
        # 计算梯度，更新模型
        self.net.zero_grad()
        curl_loss = (weights * loss).mean()
        curl_loss.mean().backward()  # Backpropagate importance-weighted minibatch loss
        nn.utils.clip_grad_norm_(self.net.parameters(), self.norm_clip)  # Clip gradients by L2 norm
        self.optimizer.step()

        # 缓冲区更新权重优先级
        self.buffer.update_priorities(idxs, loss.detach().cpu().numpy())  #

    def train_model(self):
        self.net.train()
        with common.RewardTracker(self.writer, self.params['stop_reward']) as reward_tracker:
            while True:
                self.frame_idx += 1
                self.buffer.populate(1)

                new_rewards = self.exp_source.pop_total_rewards()
                if new_rewards:
                    if reward_tracker.reward(new_rewards[0], self.frame_idx):
                        break

                
                if self.frame_idx % self.replay_frequency == 0:
                    self.net.reset_noise()


                if len(self.buffer) < self.params['replay_initial']:
                    continue
                
                if self.frame_idx % self.replay_frequency == 0:
                    self.__train()
                    self.momentum_net.alpha_sync(0.999)

                    if self.frame_idx % self.params['target_update_steps'] == 0:
                        self.tgt_net.sync()
                        self.save_model()
                        

                if self.frame_idx % self.params['evaluation_interval'] == 0:
                    self.net.eval()
                    self.eval_model()
                    self.net.train()

    
    def eval_model(self):
        with torch.no_grad():
            total_reward = 0.0
            total_q_val = 0.0
            for _ in range(self.episodes):
                noop_action_count = 0
                pre_action = -1
                obs, _ = self.test_env.reset()
                while True:
                    obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                    logits_v = self.net.qvals(obs_v)
                    logits = logits_v.data.cpu().numpy()
                    action = np.argmax(logits)
                    q_val = logits.max()
                    if action == 0 and pre_action == action:  # Noop
                        noop_action_count += 1
                        if noop_action_count > 30:
                            break
                    else:
                        noop_action_count = 0
                    pre_action = action
                    obs, reward, done, trunc, _ = self.test_env.step(action)
                    total_reward += reward
                    total_q_val += q_val
                    if done or trunc:
                        break
        mean_reward, mean_q_val = total_reward / self.episodes, total_q_val / self.episodes
        common.save_best_model(mean_reward, self.net.state_dict(), self.save_path, "rainbow-curl-best", keep_best=10)
        self.writer.add_scalar("test_reward", mean_reward)
        self.writer.add_scalar("test_q_value", mean_q_val)
        print(f"save best model, current test score: {mean_reward}, mean q value: {mean_q_val}")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    parser.add_argument("-n", "--name", default='pendulum', help="Name of the run")
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

     # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/config.yaml').read_text())
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)

    trainer = Trainer(params=params, device=device)
    trainer.load_model()
    trainer.train_model()

