#!/usr/bin/env python3
'''
已验证，验证结论：会直接去吃奖励，偶尔会躲避怪物，但基本很少躲避怪物，主要是吃奖励，也不会攻击怪物拿更高的奖励，勉强算是验证通过，后续可以考虑继续继续训练实现如何表现的更好（躲避怪物、击杀怪物）

已验证，分数可以达到1000分，理论上是可以进行训练的，有待验证训练好的模型
训练记录：
1029：调整了奖励分数和gray一致后，训练分数最高达到92分（比较稳定，不会有较大的震荡），测试分数最高达到102分，继续训练看看
1030：训练分数达到109分，测试分数达到119，继续训练看看
1031：训练分数达到118分，测试分数达到224，继续训练看看
1101：训练分数达到125分，测试分数达到232，继续训练看看
1102：训练分数达到138分，测试分数达到415分，继续训练
1103：训练分数达到146分，测试分数达到448分，继续训练
以上都没加入超参数调整，后续无提升可先不加入，让图形追上再说
1104： 训练分数达到144分， 测试分数达到458分，继续训练，训练分数稳定，测试分数有所提升，继续训练看看
1105： 训练分数达到158，测试分数保持458分，训练分数有提升，测试分数稳定，可以考虑训练卷积到此标准
'''
import gymnasium as gym
import ptan
import numpy as np
import argparse
from tensorboardX import SummaryWriter
import os

import torch
import torch.nn as nn
import torch.nn.utils as nn_utils
import torch.nn.functional as F
import torch.optim as optim
from collections import deque
import cv2
import ale_py


from lib import common

gym.register_envs(ale_py)
GAMMA = 0.99
LEARNING_RATE = 1e-4
ENTROPY_BETA = 0.01
BATCH_SIZE = 128
NUM_ENVS = 50
SAVE_ITERS = 100

REWARD_STEPS = 4
CLIP_GRAD = 0.5

class AtariA2C(nn.Module):
    def __init__(self, obs_size, n_actions):
        super(AtariA2C, self).__init__()

        self.conv = nn.Sequential(
            nn.Linear(obs_size, 512),
            nn.ReLU(),
            nn.Linear(512, 512),
            nn.ReLU(),
            nn.Linear(512, 768),
            nn.ReLU(),
            nn.Linear(768, 1024),  # New layer
            nn.ReLU(),
        )

        self.policy = nn.Sequential(
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Linear(512, n_actions)
        )

        self.value = nn.Sequential(
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Linear(512, 1)
        )

    def forward(self, x):
        linear_out = self.conv(x.float())
        return self.policy(linear_out), self.value(linear_out)
    

class AlienPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(AlienPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        reward = self.clip_reward(reward=reward)
        
        # # 每帧惩罚
        # reward += self.frame_penalty
        
        # # 处理生命减少时的惩罚
        # current_lives = info.get('lives', self.previous_lives)
        # if current_lives < self.previous_lives:
        #     reward += self.life_loss_penalty
        #     self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    

    def clip_reward(self, reward):
        if reward <= 0:
            return reward
        elif reward <= 11:
            return np.float32(1.0)
        elif reward <= 101:
            return np.float32(2.0)
        elif reward <= 501:
            return np.float32(3.0)
        elif reward <= 1001:
            return np.float32(4.0)
        elif reward <= 2001:
            return np.float32(5.0)
        elif reward <= 3001:
            return np.float32(6.0)
        else:
            return np.float32(7.0)


def unpack_batch(batch, net, device='cpu'):
    """
    Convert batch into training tensors
    :param batch:
    :param net:
    :return: states variable, actions tensor, reference values variable
    """
    states = []
    actions = []
    rewards = []
    not_done_idx = [] # 非结束的游戏数据索引，该索引记录对应batch，states，actions，rewards
    last_states = [] # 记录采样中的执行动作后的状态，仅记录游戏非结束状态下的索引
    for idx, exp in enumerate(batch):
        states.append(np.array(exp.state, copy=False))
        actions.append(int(exp.action))
        rewards.append(exp.reward)
        if exp.last_state is not None:
            not_done_idx.append(idx)
            last_states.append(np.array(exp.last_state, copy=False))
    states_v = torch.FloatTensor(np.array(states, copy=False)).to(device)
    actions_t = torch.LongTensor(actions).to(device)
    rewards_np = np.array(rewards, dtype=np.float32)
    if not_done_idx:
        last_states_v = torch.FloatTensor(np.array(last_states, copy=False)).to(device)
        last_vals_v = net(last_states_v)[1]
        last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
        rewards_np[not_done_idx] += GAMMA ** REWARD_STEPS * last_vals_np

    ref_vals_v = torch.FloatTensor(rewards_np).to(device)
    return states_v, actions_t, ref_vals_v

def test_model(env, net, device, episodes=5):
    total_reward = 0.0
    for _ in range(episodes):
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
            logits_v, _ = net(obs_v)
            probs_v = F.softmax(logits_v, dim=1)
            probs = probs_v.data.cpu().numpy()
            action = np.argmax(probs)
            obs, reward, done, trunc, _ = env.step(action)
            total_reward += reward
            if done or trunc:
                break
    return total_reward / episodes


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
    parser.add_argument("-n", "--name", default="ppo_carracing_linear", required=False, help="Name of the run")
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")

    save_path = os.path.join("saves", "a2c-linear-" + args.name)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    envs = [AlienPenaltyWrapper(gym.make("ALE/Alien-v5", repeat_action_probability=0.0, obs_type="ram")) for _ in range(NUM_ENVS)]
    test_env = AlienPenaltyWrapper(gym.make("ALE/Alien-v5", repeat_action_probability=0.0, obs_type="ram"))
    writer = SummaryWriter(comment="-alien-a2c-linear-" + args.name)

    net = AtariA2C(envs[0].observation_space.shape[0], envs[0].action_space.n).to(device)
    print(net)

    agent = ptan.agent.PolicyAgent(lambda x: net(x)[0], apply_softmax=True, device=device)
    exp_source = ptan.experience.ExperienceSourceFirstLast(envs, agent, gamma=GAMMA, steps_count=REWARD_STEPS)
    optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE, eps=1e-3)

    start_idx = 0
     # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        net.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_idx = checkpoint['start_idx']
        print("加载模型成功")

    batch = []
    best_reward = 0

    with common.RewardTracker(writer, stop_reward=1000) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
            for step_idx, exp in enumerate(exp_source):
                batch.append(exp)

                new_rewards = exp_source.pop_total_rewards()
                if new_rewards:
                    if tracker.reward(new_rewards[0], step_idx + start_idx):
                        break

                if len(batch) < BATCH_SIZE:
                    continue

                states_v, actions_t, vals_ref_v = unpack_batch(batch, net, device=device)
                batch.clear()

                optimizer.zero_grad()
                logits_v, value_v = net(states_v)
                loss_value_v = F.mse_loss(value_v.squeeze(-1), vals_ref_v)

                log_prob_v = F.log_softmax(logits_v, dim=1)
                adv_v = vals_ref_v - value_v.squeeze(-1).detach()
                log_prob_actions_v = adv_v * log_prob_v[range(BATCH_SIZE), actions_t]
                loss_policy_v = -log_prob_actions_v.mean()

                prob_v = F.softmax(logits_v, dim=1)
                entropy_loss_v = ENTROPY_BETA * (prob_v * log_prob_v).sum(dim=1).mean()

                loss_policy_v.backward(retain_graph=True)
                grads = np.concatenate([p.grad.data.cpu().numpy().flatten()
                                        for p in net.parameters()
                                        if p.grad is not None])

                loss_v = entropy_loss_v + loss_value_v
                loss_v.backward()
                nn_utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
                optimizer.step()
                start_idx += 1
                loss_v += loss_policy_v

                if start_idx % 200 == 0:
                    test_reward = test_model(test_env, net, device=device, episodes=5)
                    print(f"Test reward: {test_reward:.2f}")
                    common.save_best_model(test_reward, net.state_dict(), save_path, "a2c-gray-best", keep_best=10)

                if start_idx % SAVE_ITERS == 0:           
                    checkpoint = {
                        "net": net.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "start_idx": start_idx
                    }
                    common.save_checkpoints(start_idx + step_idx, checkpoint, save_path, "acktr", keep_last=5)

                tb_tracker.track("advantage",       adv_v, step_idx + start_idx)
                tb_tracker.track("values",          value_v, step_idx + start_idx)
                tb_tracker.track("batch_rewards",   vals_ref_v, step_idx + start_idx)
                tb_tracker.track("loss_entropy",    entropy_loss_v, step_idx + start_idx)
                tb_tracker.track("loss_policy",     loss_policy_v, step_idx + start_idx)
                tb_tracker.track("loss_value",      loss_value_v, step_idx + start_idx)
                tb_tracker.track("loss_total",      loss_v, step_idx + start_idx)
                tb_tracker.track("grad_l2",         np.sqrt(np.mean(np.square(grads))), step_idx + start_idx)
                tb_tracker.track("grad_max",        np.max(np.abs(grads)), step_idx + start_idx)
                tb_tracker.track("grad_var",        np.var(grads), step_idx + start_idx)