#!/usr/bin/env python3
'''
已适配，未验证
'''
import os.path

import gymnasium as gym
import argparse

from PIL import Image

import numpy as np
import torch
import torch.nn as nn
import ptan

from typing import Any
import ale_py

gym.register_envs(ale_py)

class AtariA2C(nn.Module):
    def __init__(self, obs_size, n_actions):
        super(AtariA2C, self).__init__()

        self.conv = nn.Sequential(
            nn.Linear(obs_size, 512),
            nn.ReLU(),
            nn.Dropout(p=0.2),
            nn.Linear(512, 512),
            nn.ReLU(),
            nn.Dropout(p=0.2),
            nn.Linear(512, 768),
            nn.ReLU(),
            nn.Dropout(p=0.2),
        )

        self.policy = nn.Sequential(
            nn.Linear(768, 512),
            nn.ReLU(),
            nn.Dropout(p=0.2),
            nn.Linear(512, n_actions)
        )

        self.value = nn.Sequential(
            nn.Linear(768, 512),
            nn.ReLU(),
            nn.Dropout(p=0.2),
            nn.Linear(512, 1)
        )

    def forward(self, x):
        linear_out = self.conv(x.float() / 255.0)
        return self.policy(linear_out), self.value(linear_out)
    

class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    

def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    # if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        # env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    # env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    # if 'FIRE' in env.unwrapped.get_action_meanings():
        # env = ptan.common.wrappers.FireResetEnv(env)
    env = RewardPenaltyWrapper(env)
    return env



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--model", default=r"M:\Projects\python\my_-nqd\learning\CarRacing\saves\a2c-ppo_carracing_linear\a2c-best-935.19999999999.pth", help="Model file to load")
    parser.add_argument("-s", "--save", type=int, help="If specified, save every N-th step as an image")
    args = parser.parse_args()
    device = "cuda" if torch.cuda.is_available() else "cpu"

    env = wrap_dqn(gym.make("ALE/BeamRider-ram-v5", frameskip=1, repeat_action_probability=0.0, obs_type="ram", render_mode="human"), episodic_life=False)

    net = AtariA2C(env.observation_space.shape[0], env.action_space.n).to(device)
    net.eval()
    if os.path.exists(args.model):
        net.load_state_dict(torch.load(args.model))

    obs, _ = env.reset()
    total_reward = 0.0
    total_steps = 0
    while True:
        obs_v = torch.FloatTensor(np.array([obs])).to(device)
        mu_v = net(obs_v)[0].to("cpu")
        action = mu_v.squeeze(dim=0).data.numpy()
        obs, reward, done, trunc, _ = env.step(np.argmax(action))
        done = done or trunc
        total_reward += reward
        total_steps += 1
        if done:
            break
    print("In %d steps we got %.3f reward" % (total_steps, total_reward))
