#!/usr/bin/env python3
'''
已适配
'''
import ptan
import gymnasium as gym
import argparse
import numpy as np

from lib import common

import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import ale_py

gym.register_envs(ale_py)

class AtariA2C(nn.Module):
    def __init__(self, obs_size, n_actions):
        super(AtariA2C, self).__init__()

        self.conv = nn.Sequential(
            nn.Linear(obs_size, 512),
            nn.ReLU(),
            nn.Linear(512, 512),
            nn.ReLU(),
            nn.Linear(512, 768),
            nn.ReLU(),
            nn.Linear(768, 1024),  # New layer
            nn.ReLU(),
        )

        self.policy = nn.Sequential(
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Linear(512, n_actions)
        )

        self.value = nn.Sequential(
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Linear(512, 1)
        )

    def forward(self, x):
        linear_out = self.conv(x.float())
        return self.policy(linear_out), self.value(linear_out)
    
def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = ptan.common.wrappers.FireResetEnv(env)
    return env


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--model", required=True, help="Model file name")
    # parser.add_argument("-w", "--write", required=True, help="Monitor directory name")
    # parser.add_argument("--seed", type=int, default=0, help="Random seed")
    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # torch.manual_seed(args.seed)
    # np.random.seed(args.seed)

    env = wrap_dqn(gym.make("ALE/Amidar-v5", frameskip=4, repeat_action_probability=0.0, obs_type="ram", render_mode="human"), episodic_life=False)
    # 用于录制游戏play过程中的视频
    # env = gym.wrappers.Monitor(env, args.write)
    net = AtariA2C(env.observation_space.shape[0], env.action_space.n).to(device)
    if os.path.exists(args.model):
        net.load_state_dict(torch.load(args.model, map_location=device, weights_only=False))
    else:
        print("未加载模型")

    act_selector = ptan.actions.ArgmaxActionSelector()

    obs, _ = env.reset()
    total_reward = 0.0
    total_steps = 0

    while True:
        obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
        logits_v, values_v = net(obs_v)
        probs_v = F.softmax(logits_v)
        probs = probs_v.data.cpu().numpy()
        actions = act_selector(probs)
        obs, r, done, trunc, _ = env.step(actions[0])
        total_reward += r
        total_steps += 1
        if done or trunc:
            break

    print("Done in %d steps, reward %.2f" % (total_steps, total_reward))
