#!/usr/bin/env python3
'''
完成适配CURL,仅增加CURL部分

参考链接：
1. https://github.com/lutery/curl_rainbow

训练记录：
20250501: 测试分数-5分，训练分数-4.950，继续训练
20250502: 测试分数0分，训练分数-0.667，继续训练
20250505: 测试分数5分，训练分数1.500，继续训练
20250506: 测试分数5分，训练异常，但是分数还不错，高于1.5分，继续训练
20250507: 测试分数5分，训练分数0.250，感觉不太好，停止训练，放到云端进行连续训练或者重新训练，调整超参数
'''
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import yaml
import pathlib
import sys
import random
import os

from tensorboardX import SummaryWriter

from lib import model, common

import ale_py

gym.register_envs(ale_py)


class Trainer:

    def __init__(self, params, device):
        self.params = params
        self.device = device
        self.batch_size = params['batch_size']
        self.frame_idx = 0
        self.episodes = 5
        self.REWARD_STEPS = params['REWARD_STEPS']
        self.Vmin = params['Vmin']
        self.Vmax = params['Vmax']
        self.atoms = params['N_ATOMS']
        self.delta_z = params['DELTA_Z']
        self.beta = params['BETA_START']
        self.BETA_START = params['BETA_START']
        self.BETA_FRAME = params['BETA_FRAMES']
        self.coeff = params['coeff']

        self.save_path = os.path.join("saves", "rainbow-curl-klax")
        os.makedirs(self.save_path, exist_ok=True)
        self.writer = SummaryWriter(comment="-" + params['run_name'] + "-rainbow-curl")

        self.build_env()
        self.build_model()

    
    def build_env(self):
        self.env = common.wrap_dqn(params['env_name'])
        self.test_env = common.wrap_dqn(params['env_name'])  
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.n


    def build_model(self):
        self.net = model.RainbowDQN(self.obs_shape, self.action_shape, self.params).to(device)
        self.momentum_net = ptan.agent.TargetNet(self.net, requre_grad=False)
        self.tgt_net = ptan.agent.TargetNet(self.net, requre_grad=False)
        print(self.net)

        self.agent = ptan.agent.DQNAgent(lambda x: self.net.qvals(x), ptan.actions.ArgmaxActionSelector(), device=device)
        self.exp_source = ptan.experience.ExperienceSourceFirstLast(self.env, self.agent, gamma=self.params['gamma'], steps_count=self.params['REWARD_STEPS'])
        self.buffer = ptan.experience.PrioritizedReplayBuffer(self.exp_source, self.params['replay_size'], self.params['PRIO_REPLAY_ALPHA'])
        self.optimizer = optim.Adam(self.net.parameters(), lr=params['learning_rate'], eps=1e-3)
        self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=200000, gamma=0.9)

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
            self.net.load_state_dict(checkpoint['net'])
            self.momentum_net.target_model.load_state_dict(checkpoint['momentum_net'])
            self.tgt_net.target_model.load_state_dict(checkpoint['tgt_net'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.frame_idx = checkpoint['frame_idx']
            self.scheduler.load_state_dict(checkpoint['scheduler'])
            print("加载模型成功")
            # 打印学习率
            for param_group in self.optimizer.param_groups:
                print("学习率：", param_group['lr'])


    def save_model(self):
        checkpoint = {
            "net": self.net.state_dict(),
            "momentum_net": self.momentum_net.target_model.state_dict(),
            "tgt_net": self.tgt_net.target_model.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "frame_idx": self.frame_idx,
            "scheduler": self.scheduler.state_dict()
        }
        common.save_checkpoints(self.frame_idx, checkpoint, self.save_path, "rainbow-curl", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")


    def calc_loss(self, batch, batch_weights, net, tgt_net, gamma, device="cpu"):
        states, actions, rewards, dones, next_states = common.unpack_batch(batch)
        batch_size = len(batch)

        states_v = torch.tensor(states).to(device).float()
        aug_states_1 = common.aug(states_v).to(device=self.device)
        aug_states_2 = common.aug(states_v).to(device=self.device)
        actions_v = torch.tensor(actions).to(device)
        next_states_v = torch.tensor(next_states).to(device)
        batch_weights_v = torch.tensor(batch_weights).to(device)

        # next state distribution
        # dueling arch -- actions from main net, distr from tgt_net

        # calc at once both next and cur states
        distr_v, qvals_v, _ = net.both(torch.cat((states_v, next_states_v)))
        _, _, z_anch = net.both(torch.cat((aug_states_1, next_states_v)))
        _, _, z_target = self.momentum_net.target_model.both(torch.cat((aug_states_2, next_states_v)))

        z_proj = torch.matmul(net.W, z_target.T)
        logits = torch.matmul(z_anch, z_proj)
        logits = (logits - torch.max(logits, 1)[0][:, None])
        logits = logits * 0.1
        labels = torch.arange(logits.shape[0]).long().to(device=self.device)
        moco_loss = (nn.CrossEntropyLoss()(logits, labels)).to(device=self.device)

        next_qvals_v = qvals_v[batch_size:]
        distr_v = distr_v[:batch_size]

        next_actions_v = next_qvals_v.max(1)[1]
        next_distr_v = tgt_net(next_states_v)[0]
        next_best_distr_v = next_distr_v[range(batch_size), next_actions_v.data]
        next_best_distr_v = tgt_net.apply_softmax(next_best_distr_v)
        next_best_distr = next_best_distr_v.data.cpu().numpy()

        dones = dones.astype(np.bool_)

        # project our distribution using Bellman update
        proj_distr = common.distr_projection(next_best_distr, rewards, dones, self.Vmin, self.Vmax, self.atoms, gamma)

        # calculate net output
        state_action_values = distr_v[range(batch_size), actions_v.data]
        state_log_sm_v = F.log_softmax(state_action_values, dim=1)
        proj_distr_v = torch.tensor(proj_distr).to(device)

        loss_v = -state_log_sm_v * proj_distr_v
        loss_v = batch_weights_v * loss_v.sum(dim=1) + moco_loss * self.coeff
        return loss_v.mean(), loss_v + 1e-5


    def __train(self):
        self.optimizer.zero_grad()
        batch, batch_indices, batch_weights = self.buffer.sample(params['batch_size'], self.beta)
        loss_v, sample_prios_v = self.calc_loss(batch, batch_weights, self.net, self.tgt_net.target_model,
                                            params['gamma'] ** self.REWARD_STEPS, device=device)
        loss_v.backward()
        self.optimizer.step()
        self.buffer.update_priorities(batch_indices, sample_prios_v.data.cpu().numpy())


    def train_model(self):
        self.net.train()
        with common.RewardTracker(self.writer, self.params['stop_reward']) as reward_tracker:
            while True:
                self.frame_idx += 1
                self.buffer.populate(1)
                self.beta = min(1.0, self.BETA_START + self.frame_idx * (1.0 - self.BETA_START) / self.BETA_FRAME)

                new_rewards = self.exp_source.pop_total_rewards()
                if new_rewards:
                    if reward_tracker.reward(new_rewards[0], self.frame_idx):
                        break


                if len(self.buffer) < self.params['replay_initial']:
                    continue
                
                self.__train()
                self.momentum_net.alpha_sync(0.999)

                if self.frame_idx % self.params['target_net_sync'] == 0:
                    self.tgt_net.sync()
                    self.save_model()
                        
                if self.frame_idx % self.params['evaluation_interval'] == 0:
                    self.net.eval()
                    self.eval_model()
                    self.net.train()

    
    def eval_model(self):
        with torch.no_grad():
            total_reward = 0.0
            total_q_val = 0.0
            for _ in range(self.episodes):
                noop_action_count = 0
                pre_action = -1
                obs, _ = self.test_env.reset()
                while True:
                    obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                    logits_v = self.net.qvals(obs_v)
                    logits = logits_v.data.cpu().numpy()
                    action = np.argmax(logits)
                    q_val = logits.max()
                    if action == 0 and pre_action == action:  # Noop
                        noop_action_count += 1
                        if noop_action_count > 30:
                            break
                    else:
                        noop_action_count = 0
                    pre_action = action
                    obs, reward, done, trunc, _ = self.test_env.step(action)
                    total_reward += reward
                    total_q_val += q_val
                    if done or trunc:
                        break
        mean_reward, mean_q_val = total_reward / self.episodes, total_q_val / self.episodes
        common.save_best_model(mean_reward, self.net.state_dict(), self.save_path, "rainbow-curl-best", keep_best=10)
        self.writer.add_scalar("test_reward", mean_reward)
        self.writer.add_scalar("test_q_value", mean_q_val)
        print(f"save best model, current test score: {mean_reward}, mean q value: {mean_q_val}")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    parser.add_argument("-n", "--name", default='pendulum', help="Name of the run")
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

     # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/config.yaml').read_text())
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)
    params['DELTA_Z'] = (params['Vmax'] - params['Vmin']) / (params['N_ATOMS'] - 1)

    trainer = Trainer(params=params, device=device)
    trainer.load_model()
    trainer.train_model()

