#!/usr/bin/env python3
'''
已验证，验证不通过
1. 无法完成一局游戏
2. play的过程中容易停止不动好几秒，然后再继续
3. 已经涂过颜色的墙壁依旧会重走
4. 涂强奖励较低（1、3、6、2等），但是涂成一圈的奖励较高（48），但是游戏中貌似没有尽快的涂成一圈进行

训练记录：
1105：训练分数达到219分，测试分数达到304分，分析一下学习率变化，考虑是否暂时屏蔽学习率调整
1106: 训练分数达到276分，测试分数达到341分，超过linear，应该是训练成功，待play
1211: 由于原先没有继续训练，所以需要重新开启训练继续训练，在cloudsutio上重新训练，训练分数191分，测试分数暂无（由于被之前训练的测试分数放上去了，所以应该暂未升成），继续训练
1212: 学习率： 0.00045，训练分数234分，测试分数351分，继续训练
1213：学习率：0.000328，训练分数419分，测试分数439分，继续训练
1214:学习率：0.000295245，训练分数423分，测试分数487分，继续训练
1215：学习率：0.000215233605,训练分数425分，测试分数494分，继续训练
1216: 学习率：0.0001743，训练分数421分，测试分数494分，判断学习率，训练一天，无进步则停止训练
1217:学习率：0.0001412,训练分数423分，测试分数494分，判断学习率，训练一天，无进步则停止训练
1218：学习率：0.000114，训练分数422分，测试分数494分，无进步，停止训练，play模型
20250218: 完成代码调整，开始重新进行训练
20250219: 开始在2号机上训练，训练分数173.220，测试分数147分，继续训练
20250220:学习率： 0.0005，训练分数183.970，测试分数227分，继续训练
20250221:学习率： 0.0005，训练分数186.070，测试分数227分，继续训练
20250222:学习率： 0.0005，训练分数191.540，测试分数237分，继续训练
20250224:学习率： 0.0005,训练分数211.570，测试分数269，继续训练
20250225:学习率： 0.0005，训练分数214.470，测试分数269，继续训练
20250226:学习率： 0.0005，训练分数226.500，测试分数299，继续训练
20250227:学习率： 0.0005，训练分数229.760，测试分数299，继续训练
20250228:学习率： 0.0005，训练分数235.630，测试分数299，继续训练
20250301:学习率： 0.0005，训练分数241.70，测试分数316分，继续训练 发现预先1218的分数上升比现在快，确认对比代码
20250302:学习率： 0.0005，训练分数237.260，测试分数316分，继续训练，加入调度器
20250303:学习率： 0.0005，测试分数319分，训练分数251.570，继续训练，关闭调度器
20250304:学习率： 0.00045000000000000004，测试分数322分，训练分数未知，继续训练
20250305:学习率： 0.00045000000000000004，测试分数322分，训练分数251.150，继续训练，开放学习率调度器
20250306：学习率： 0.00045000000000000004，测试分数432分，训练分数未知，关闭学习率调度器
20250307:学习率： 0.00040500000000000003，测试分数432，训练分数 343.840，继续训练
20250308:学习率： 0.00040500000000000003，测试分数441，训练分数325.340，继续训练
20250309:学习率： 0.000405000，测试分数451分，训练分数342.320，继续训练
200250310：学习率： 0.0004050，测试分数451分，训练分数334.540，查看曲线，开放调度器，继续训练，对比原先的代码总结不同点
20250311:学习率： 0.0004050，测试分数451分，训练分数343.100，继续训练，查看学习率，考虑是否继续开放调度器
20250312：学习率： 0.0003645
20250312:测试分数451分，训练分数356.060，继续训练，查看曲线和学习率调度器
20250313:学习率： 0.0003645，测试分数451分，训练分数363.100，继续训练
20250314:学习率： 0.0003645，测试分数452分，训练分数357.720，继续训练，关闭学习率调度器
20250315:学习率： 0.0003645，测试分数452分，训练分数，362.140，继续训练
20250316:学习率： 0.0003280，测试分数452分，训练分数360.090，继续训练，开启学习率调度器，查看曲线
20250317:学习率： 0.0002657205，测试分数452分，训练分数357.410，查看学习率，查看曲线，是否需要停止训练，play模型
20250318：继续训练一天，未进步则play模型，学习率： 0.0002657205未变化，测试分数452分，训练分数356.780，查看学习率调度器，停止训练，play模型
20250318：学习率： 0.00023914845,今天无进步则停止训练，play模型，测试分数452，训练分数369.480，关闭学习率调度器，继续训练
20250320：学习率： 0.0002152，测试分数452，训练分数383.450，继续训练，查看学习率调度器
20250321:学习率： 0.00021523，测试分数452.0，训练分数379.960，切换学习率调度器，继续训练
20250322:学习率： 0.000215
20250323:学习率： 0.000215233605，测试分数452，训练分数378.780，开启学习率调度器看看是否继续训练
20250324:学习率： 0.0001937
20250325：学习率： 0.0001937102445，测试分数_452，训练分数376.670，查看学习率调度器是否需要继续训练
20250326：停止训练，play模型
'''
import gymnasium as gym
import ptan
import numpy as np
import argparse
from tensorboardX import SummaryWriter
import os

import torch
import torch.nn as nn
import torch.nn.utils as nn_utils
import torch.nn.functional as F
import torch.optim as optim

from typing import Any
from lib import common
import ale_py

gym.register_envs(ale_py)
GAMMA = 0.99
LEARNING_RATE = 5e-4
ENTROPY_BETA = 0.01
BATCH_SIZE = 128
NUM_ENVS = 50

REWARD_STEPS = 4
CLIP_GRAD = 0.5

SAVE_ITERS = 100


class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info

class AtariA2C(nn.Module):
    def __init__(self, input_shape, n_actions):
        super(AtariA2C, self).__init__()

        # obs_action = (input_shape[2], input_shape[0], input_shape[1])
        print("obs_action: ", input_shape)
        obs_action = input_shape

        self.conv = nn.Sequential(
            nn.Conv2d(obs_action[0], 64, kernel_size=8, stride=4),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
        )

        conv_out_size = self._get_conv_out(obs_action)
        self.policy = nn.Sequential(
            nn.Linear(conv_out_size, 512),
            nn.ReLU(),
            nn.Linear(512, n_actions)
        )

        self.value = nn.Sequential(
            nn.Linear(conv_out_size, 512),
            nn.ReLU(),
            nn.Linear(512, 1)
        )

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        fx = x.float() / 256
        conv_out = self.conv(fx).view(fx.size()[0], -1)
        return self.policy(conv_out), self.value(conv_out)


def unpack_batch(batch, net, device='cpu'):
    """
    Convert batch into training tensors
    :param batch:
    :param net:
    :return: states variable, actions tensor, reference values variable
    """
    states = []
    actions = []
    rewards = []
    not_done_idx = [] # 非结束的游戏数据索引，该索引记录对应batch，states，actions，rewards
    last_states = [] # 记录采样中的执行动作后的状态，仅记录游戏非结束状态下的索引
    for idx, exp in enumerate(batch):
        states.append(np.asarray(exp.state))
        actions.append(int(exp.action))
        rewards.append(exp.reward)
        if exp.last_state is not None:
            not_done_idx.append(idx)
            last_states.append(np.asarray(exp.last_state))
    states_v = torch.FloatTensor(np.asarray(states)).to(device)
    actions_t = torch.LongTensor(actions).to(device)
    rewards_np = np.array(rewards, dtype=np.float32)
    if not_done_idx:
        last_states_v = torch.FloatTensor(np.asarray(last_states)).to(device)
        last_vals_v = net(last_states_v)[1]
        last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
        rewards_np[not_done_idx] += GAMMA ** REWARD_STEPS * last_vals_np

    ref_vals_v = torch.FloatTensor(rewards_np).to(device)   
    return states_v, actions_t, ref_vals_v



@torch.no_grad()
def test_model(env, net, device, episodes=5):
    total_reward = 0.0
    for _ in range(episodes):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
            logits_v, _ = net(obs_v)
            probs_v = F.softmax(logits_v, dim=1)
            probs = probs_v.data.cpu().numpy()
            action = np.argmax(probs)
            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            total_reward += reward
            if done or trunc:
                break
    return total_reward / episodes


def optimized_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model.
    :param states: list of numpy arrays with states
    :return: torch.Tensor
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.asarray([np.asarray(s) for s in states])
    return torch.from_numpy(np_states)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
    parser.add_argument("-n", "--name", default="breakout", required=False, help="Name of the run")
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")

    save_path = os.path.join("saves", "a2c-conv-" + args.name)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    envs = [common.wrap_dqn(gym.make("ALE/Amidar-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False) for _ in range(NUM_ENVS)]
    test_env = common.wrap_dqn(gym.make("ALE/Amidar-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    writer = SummaryWriter(comment="-a2c-conv_" + args.name)

    net = AtariA2C(envs[0].observation_space.shape, envs[0].action_space.n).to(device)
    print(net)

    agent = ptan.agent.PolicyAgent(lambda x: net(x)[0], apply_softmax=True, device=device, preprocessor=optimized_states_preprocessor)
    exp_source = ptan.experience.ExperienceSourceFirstLast(envs, agent, gamma=GAMMA, steps_count=REWARD_STEPS)
    optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE, eps=1e-3)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=60000, gamma=0.9)

    batch = []
    best_reward = 0
    frame_idx = 0
    start_idx = 0

    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)), key=lambda x: int(x.split('_')[2].split('.')[0]))
        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            frame_idx = checkpoint['frame_idx']
            net.load_state_dict(checkpoint['net'])
            start_idx = checkpoint['start_idx']
            optimizer.load_state_dict(checkpoint['optimizer'])
            scheduler.load_state_dict(checkpoint['scheduler'])
            print("加载模型成功")
            print("学习率：", optimizer.param_groups[0]['lr'])

    with common.RewardTracker(writer, stop_reward=100000) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
            for step_idx, exp in enumerate(exp_source):
                batch.append(exp)

                new_rewards = exp_source.pop_total_rewards()
                if new_rewards:
                    if tracker.reward(new_rewards[0], step_idx + start_idx):
                        break

                if len(batch) < BATCH_SIZE:
                    continue


                states_v, actions_t, vals_ref_v = unpack_batch(batch, net, device=device)
                batch.clear()

                optimizer.zero_grad()
                logits_v, value_v = net(states_v)
                loss_value_v = F.mse_loss(value_v.squeeze(-1), vals_ref_v)

                log_prob_v = F.log_softmax(logits_v, dim=1)
                adv_v = vals_ref_v - value_v.squeeze(-1).detach()
                log_prob_actions_v = adv_v * log_prob_v[range(BATCH_SIZE), actions_t]
                loss_policy_v = -log_prob_actions_v.mean()

                prob_v = F.softmax(logits_v, dim=1)
                entropy_loss_v = ENTROPY_BETA * (prob_v * log_prob_v).sum(dim=1).mean()

                loss_policy_v.backward(retain_graph=True)
                grads = np.concatenate([p.grad.data.cpu().numpy().flatten()
                                        for p in net.parameters()
                                        if p.grad is not None])

                loss_v = entropy_loss_v + loss_value_v
                loss_v.backward()
                nn_utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
                optimizer.step()
                loss_v += loss_policy_v
                frame_idx += 1
                scheduler.step()

                if frame_idx % 200 == 0:
                    # Test the model
                    net.eval()
                    test_reward = test_model(test_env, net, device=device, episodes=2)
                    net.train()
                    print(f"Test reward: {test_reward:.2f}")
                    common.save_best_model(test_reward, net.state_dict(), save_path, "a2c-best", keep_best=10)

                if frame_idx % SAVE_ITERS == 0:
                    checkpoint = {
                        "net": net.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "frame_idx": frame_idx,
                        "start_idx": start_idx + step_idx,
                        "scheduler": scheduler.state_dict()
                    }
                    common.save_checkpoints(frame_idx, checkpoint, save_path, "a2c", keep_last=5)


                tb_tracker.track("advantage",       adv_v, step_idx + start_idx)
                tb_tracker.track("values",          value_v, step_idx + start_idx)
                tb_tracker.track("batch_rewards",   vals_ref_v, step_idx + start_idx)
                tb_tracker.track("loss_entropy",    entropy_loss_v, step_idx + start_idx)
                tb_tracker.track("loss_policy",     loss_policy_v, step_idx + start_idx)
                tb_tracker.track("loss_value",      loss_value_v, step_idx + start_idx)
                tb_tracker.track("loss_total",      loss_v, step_idx + start_idx)
                tb_tracker.track("grad_l2",         np.sqrt(np.mean(np.square(grads))), step_idx + start_idx)
                tb_tracker.track("grad_max",        np.max(np.abs(grads)), step_idx + start_idx)
                tb_tracker.track("grad_var",        np.var(grads), step_idx + start_idx)