#!/usr/bin/env python3
'''
已适配
'''
import ptan
import gymnasium as gym
import argparse
import numpy as np

from typing import Any

import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import ale_py

gym.register_envs(ale_py)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    

def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    env = ptan.common.wrappers.ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = ptan.common.wrappers.FrameStack(env, stack_frames)
    return env


class AtariA2C(nn.Module):
    def __init__(self, input_shape, n_actions):
        super(AtariA2C, self).__init__()

        # obs_action = (input_shape[2], input_shape[0], input_shape[1])
        print("obs_action: ", input_shape)
        obs_action = input_shape

        self.conv = nn.Sequential(
            nn.Conv2d(obs_action[0], 64, kernel_size=8, stride=4),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
        )

        conv_out_size = self._get_conv_out(obs_action)
        self.policy = nn.Sequential(
            nn.Linear(conv_out_size, 512),
            nn.ReLU(),
            nn.Linear(512, n_actions)
        )

        self.value = nn.Sequential(
            nn.Linear(conv_out_size, 512),
            nn.ReLU(),
            nn.Linear(512, 1)
        )

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        fx = x.float() / 256
        conv_out = self.conv(fx).view(fx.size()[0], -1)
        return self.policy(conv_out), self.value(conv_out)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--model", required=True, help="Model file name")
    # parser.add_argument("-w", "--write", required=True, help="Monitor directory name")
    # parser.add_argument("--seed", type=int, default=0, help="Random seed")
    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # torch.manual_seed(args.seed)
    # np.random.seed(args.seed)

    env = wrap_dqn(gym.make("ALE/Amidar-v5", frameskip=4, repeat_action_probability=0.0, render_mode="human"), episodic_life=False)
    # 用于录制游戏play过程中的视频
    # env = gym.wrappers.Monitor(env, args.write)
    net = AtariA2C(env.observation_space.shape, env.action_space.n).to(device)
    if os.path.exists(args.model):
        net.load_state_dict(torch.load(args.model, map_location=device, weights_only=False))
    else:
        print("未加载模型")

    act_selector = ptan.actions.ArgmaxActionSelector()

    obs, _ = env.reset()
    total_reward = 0.0
    total_steps = 0

    while True:
        obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
        logits_v, values_v = net(obs_v)
        probs_v = F.softmax(logits_v)
        probs = probs_v.data.cpu().numpy()
        actions = act_selector(probs)
        obs, r, done, trunc, _ = env.step(actions[0])
        if r != 0:
            print("Reward: ", r)
        total_reward += r
        total_steps += 1
        if done or trunc:
            break

    print("Done in %d steps, reward %.2f" % (total_steps, total_reward))
