#!/usr/bin/env python3
'''
已经适配，训练通过，待验证

训练记录：
20250711：在2号机上训练，训练分数12.680，测试分数182.000
20250712: 继续训练，训练分数 28.960， 测试分数302分
20250713: 继续训练，测试分数302， 训练分数40.440
20250714： 继续训练,测试分数342，训练分数91.120
20250715: 继续训练，测试分数342，训练分数110.120
20250716:  继续训练，测试分数342，训练分数147.840
20250717:  继续训练，测试分数542，训练分数171.880
20250718: 继续训练，测试分数542，训练分数173.320，加入学习率调度器
20250719: 测试分数542，训练分数182.040，继续训练
20250720: 测试分数542，训练分数189.680，继续训练
20250720: 测试分数542，测试分数未更新，暂停训练，学习率已经下降到0.0002左右，待验证
'''
import gymnasium as gym
import ptan
import numpy as np
import argparse
from tensorboardX import SummaryWriter
import os

import torch
import torch.nn as nn
import torch.nn.utils as nn_utils
import torch.nn.functional as F
import torch.optim as optim

from lib import common_a2c as common
from lib import model_a2c as model
import ale_py
import yaml
import pathlib
import sys

gym.register_envs(ale_py)



class Trainer:
    def __init__(self, params, device):
        self.params = params
        self.device = device
        self.learning_rate = params.get('learning_rate', 0.0001)
        self.num_envs = params.get('num_envs', 8)
        self.batch_size = params.get('batch_size', 32)
        self.gamma = params.get('gamma', 0.99)
        self.reward_steps = params.get('reward_steps', 5)
        self.entropy_beta = params.get('entropy_beta', 0.01)
        self.clip_grad = params.get('clip_grad', 0.5)
        self.save_iters = params.get('save_iters', 10000)

        self.save_path = os.path.join("saves", "a2c-conv" + self.params['name'])
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path)
        self.writer = SummaryWriter(comment="-a2c-conv_" + self.params['name'])
        self.batch = []
        self.best_reward = 0
        self.frame_idx = 0
        self.start_idx = 0

        self.__build_env()
        self.__build_model()
        self.__build_buffer()


    def __build_env(self):
        self.test_env = common.wrap_dqn(gym.make("ALE/BattleZone-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
        self.envs = [common.wrap_dqn(gym.make("ALE/BattleZone-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False) for _ in range(self.num_envs)]


    def __build_model(self):
        self.net = model.AtariA2C(self.envs[0].observation_space.shape, self.envs[0].action_space.n).to(device)
        print(self.net)

        self.optimizer = optim.Adam(self.net.parameters(), lr=self.learning_rate, eps=1e-3)
        self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=50000, gamma=0.9)


    def __build_buffer(self):
        self.agent = ptan.agent.PolicyAgent(lambda x: self.net(x)[0], apply_softmax=True, device=self.device, preprocessor=common.optimized_states_preprocessor)
        self.exp_source = ptan.experience.ExperienceSourceFirstLast(self.envs, self.agent, gamma=self.gamma, steps_count=self.reward_steps)


    def load_model(self):
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)), key=lambda x: int(x.split('_')[2].split('.')[0]))
            checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
            self.frame_idx = checkpoint['frame_idx']
            self.start_idx = checkpoint['start_idx']
            self.net.load_state_dict(checkpoint['net'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.scheduler.load_state_dict(checkpoint['scheduler'])
            print("加载模型成功")
            # 打印学习率大小
            print("Learning Rate:", self.scheduler.get_last_lr()[0])


    def train_model(self):
        with common.RewardTracker(self.writer, stop_reward=22200) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=10) as tb_tracker:
                for step_idx, exp in enumerate(self.exp_source):
                    self.batch.append(exp)

                    new_rewards = self.exp_source.pop_total_rewards()
                    if new_rewards:
                        if tracker.reward(new_rewards[0], step_idx + self.start_idx):
                            break

                    if len(self.batch) < self.batch_size:
                        continue


                    states_v, actions_t, vals_ref_v = common.unpack_batch(self.batch, self.net, gamma=self.gamma, reward_steps=self.reward_steps, device=self.device)
                    self.batch.clear()

                    self.optimizer.zero_grad()
                    logits_v, value_v = self.net(states_v)
                    loss_value_v = F.mse_loss(value_v.squeeze(-1), vals_ref_v)

                    log_prob_v = F.log_softmax(logits_v, dim=1)
                    adv_v = vals_ref_v - value_v.squeeze(-1).detach()
                    log_prob_actions_v = adv_v * log_prob_v[range(self.batch_size), actions_t]
                    loss_policy_v = -log_prob_actions_v.mean()

                    prob_v = F.softmax(logits_v, dim=1)
                    entropy_loss_v = self.entropy_beta * (prob_v * log_prob_v).sum(dim=1).mean()

                    loss_policy_v.backward(retain_graph=True)
                    grads = np.concatenate([p.grad.data.cpu().numpy().flatten()
                                            for p in self.net.parameters()
                                            if p.grad is not None])

                    loss_v = entropy_loss_v + loss_value_v
                    loss_v.backward()
                    nn_utils.clip_grad_norm_(self.net.parameters(), self.clip_grad)
                    self.optimizer.step()
                    loss_v += loss_policy_v
                    self.frame_idx += 1
                    self.scheduler.step()

                    self.eval_model(step_idx)


                    tb_tracker.track("advantage",       adv_v, step_idx + self.start_idx)
                    tb_tracker.track("values",          value_v, step_idx + self.start_idx)
                    tb_tracker.track("batch_rewards",   vals_ref_v, step_idx + self.start_idx)
                    tb_tracker.track("loss_entropy",    entropy_loss_v, step_idx + self.start_idx)
                    tb_tracker.track("loss_policy",     loss_policy_v, step_idx + self.start_idx)
                    tb_tracker.track("loss_value",      loss_value_v, step_idx + self.start_idx)
                    tb_tracker.track("loss_total",      loss_v, step_idx + self.start_idx)
                    tb_tracker.track("grad_l2",         np.sqrt(np.mean(np.square(grads))), step_idx + self.start_idx)
                    tb_tracker.track("grad_max",        np.max(np.abs(grads)), step_idx + self.start_idx)
                    tb_tracker.track("grad_var",        np.var(grads), step_idx + self.start_idx)

    
    def eval_model(self, step_idx):
        if self.frame_idx % 200 == 0:
            # Test the model
            self.net.eval()
            test_reward = Trainer.test_model(self.test_env, self.net, device=self.device, episodes=2)
            self.net.train()
            print(f"Test reward: {test_reward:.2f}")
            common.save_best_model(test_reward, self.net.state_dict(), self.save_path, "a2c-best", keep_best=10)

        if self.frame_idx % self.save_iters == 0:
            checkpoint = {
                "net": self.net.state_dict(),
                "optimizer": self.optimizer.state_dict(),
                "frame_idx": self.frame_idx,
                "start_idx": step_idx + self.start_idx,
                "scheduler": self.scheduler.state_dict()
            }
            common.save_checkpoints(self.frame_idx, checkpoint, self.save_path, "a2c", keep_last=5)

    
    @staticmethod
    def test_model(env, net, device, episodes=5):
        with torch.no_grad():
            total_reward = 0.0
            for _ in range(episodes):
                noop_action_count = 0
                pre_action = -1
                obs, _ = env.reset()
                while True:
                    obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
                    logits_v, _ = net(obs_v)
                    probs_v = F.softmax(logits_v, dim=1)
                    probs = probs_v.data.cpu().numpy()
                    action = np.argmax(probs)
                    if action == 0 and pre_action == action:  # Noop
                        noop_action_count += 1
                        if noop_action_count > 30:
                            break
                    else:
                        noop_action_count = 0
                    pre_action = action
                    obs, reward, done, trunc, _ = env.step(action)
                    total_reward += reward
                    if done or trunc:
                        break
        return total_reward / episodes

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
    parser.add_argument("-n", "--name", default="battlezone", help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/config.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)
    params['device'] = device

    trainer = Trainer(params=params, device=device)
    trainer.load_model()
    trainer.train_model()