#!/usr/bin/env python3
'''
完成适配

训练记录：
在2号机上训练
20250202: 初始分数20分左右，训练分数2946.665，测试分数15847.56，继续训练,加入学习率调度器
20250203:暂停训练，开始训练其他脚本进行对比
20250204：调整任务目标，改为landing任务，训练分数中途出现了正向分数，但是晚上最终分数为负数，测试分数为_448984019.5947374（晚上11点），可以play模型测试
'''
import os
import gymnasium as gym
import ptan
import argparse
from game.rocket import Rocket

import logging
from logging.handlers import RotatingFileHandler
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np

from tensorboardX import SummaryWriter
from typing import Any
from lib import model, common
import ale_py

gym.register_envs(ale_py)


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    return env


def test_model(env, net, device, episodes=5):
    total_reward = 0.0
    for _ in range(episodes):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
            logits_v = net(obs_v)
            probs_v = F.softmax(logits_v, dim=1)
            probs = probs_v.data.cpu().numpy()
            action = np.argmax(probs)
            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            total_reward += reward
            if done or trunc:
                break
    return total_reward / episodes


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


if __name__ == "__main__":
    params = {
        'env_name':         "Rocket_Landing",
        'stop_reward':      5000.0,
        'run_name':         'dqn-basic-Rocket-Landing',
        'replay_size':      10 ** 6,
        'replay_initial':   50000,
        'target_net_sync':  10000,
        'epsilon_frames':   10 ** 6,
        'epsilon_start':    1.0,
        'epsilon_final':    0.1,
        'learning_rate':    0.00025,
        'gamma':            0.99,
        'batch_size':       64
    }

    save_path = os.path.join("saves", params['run_name'])
    os.makedirs(save_path, exist_ok=True)
    device = common.select_device()

    env = wrap_dqn(Rocket(max_steps=999999999, task='landing'))
    test_env = wrap_dqn(Rocket(max_steps=999999999, task='landing'))


    # 创建tensor board的存储目录
    writer = SummaryWriter(comment="rocket-recycle-basic")
    # 创建dqn网络模型
    net = model.DQNModel(env.observation_space.shape[0], env.action_space.n).to(device)

    # 考虑dqn网络，创建一个目标网络,负责计算在线网络net的目标q值
    tgt_net = ptan.agent.TargetNet(net)
    # 创建一个基于epsilon进行动作动作选择的选择器
    selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=1.0)
    # epsion的值含义:epsion标识一个会随着训练的增加而逐渐减少的随机值
    # 表示训练一开始以随机选择执行的动作为主，因为此时网络并没有训练，进行随机探索
    # 随着训练的增加，网络已经具备一些情况下的决策能力，可以进行动作的选择
    epsilon_tracker = common.EpsilonTracker(selector, 1.0, 0.1, 10**6)
    # 创建一个dqn推理网络的代理
    agent = ptan.agent.DQNAgent(net, selector, device=device)

    # 创建经验重放缓冲区（也就是训练过程中采集的样本）
    # ExperienceSourceFirstLast应该是存储样本的缓存去
    exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=0.99, steps_count=1)
    # 第二个应该是从缓存区提取一小段样本进行训练的
    buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=3*10 ** 5)

    # 优化器
    optimizer = optim.Adam(net.parameters(), lr=0.0001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=600000, gamma=0.9)

    # 统计所执行经理的帧数（轮数）
    frame_idx = 0
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            net.load_state_dict(checkpoint['net'])
            tgt_net.target_model.load_state_dict(checkpoint['tgt_net'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            frame_idx = checkpoint['frame_idx']
            if "scheduler" in checkpoint:
                scheduler.load_state_dict(checkpoint['scheduler'])
            print("加载模型成功")
            print("学习率：", optimizer.param_groups[0]['lr'])
            print("frame_idx: ", frame_idx)
            print("scheduler last epoch: ", scheduler.last_epoch)

    logger = setup_logger(save_path)
    # 创建一个奖励跟踪器
    with common.RewardTracker(writer, stop_reward=1000000000.0, logger=logger) as reward_tracker:
        while True:
            frame_idx += 1
            # 从经验池中获取一次样本存放在缓存中
            buffer.populate(1)
            # 更新epsilon值
            epsilon_tracker.frame(frame_idx)

            # 这个操作是将所有采集的激励pop出缓存并清空缓存
            # 这里清空仅仅只清空exp_source中的记录，在buffer中的记录并没有清空
            new_rewards = exp_source.pop_total_rewards()
            if new_rewards:
                # 这里主要是判断最近的一次激励是否达到了目标值，达到则停止训练
                if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
                    # 将当前采集的首个激励，帧总数传入到reward_tracker
                    # 如果平均激励值已经达到了所要的目标值，则break跳出循环
                    break

            # 检查经验池中的样本长度是否达到了目标的长度大小，
            if len(buffer) < 20000:
                continue

            # 清空优化器的梯度
            optimizer.zero_grad()
            # 从经验缓冲区中采集batch_size大小的样本
            batch = buffer.sample(128)
            # 计算损失值并更新神经网路
            loss_v = common.calc_loss_dqn(batch, net, tgt_net.target_model, gamma=0.99, device=device)
            loss_v.backward()
            optimizer.step()
            scheduler.step()

            if frame_idx % 1000 == 0:
                tgt_net.sync()
            if frame_idx % 5000 == 0:
                # Test the model
                test_reward = test_model(test_env, net, device=device, episodes=5)
                print(f"Test reward: {test_reward:.2f}")
                common.save_best_model(test_reward, net.state_dict(), save_path, "dqn-basic-best", keep_best=10)


                checkpoint = {
                    'net': net.state_dict(),
                    'tgt_net': tgt_net.target_model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'frame_idx': frame_idx,
                    'scheduler': scheduler.state_dict()
                }
                common.save_checkpoints(frame_idx, checkpoint, save_path, "dqn-basic", keep_last=5)
