"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""

import torch
from .env import create_train_env
from lib import common
from .model import PPO
import torch.nn.functional as F
from collections import deque


def eval(opt, global_model, obs_size, num_actions, device, save_path):
    torch.manual_seed(123)
    # env = create_train_env("ALE/Asterix-v5", render_mode="human")
    env = create_train_env("ALE/Asterix-v5")
    local_model = PPO(obs_size, num_actions)
    local_model = local_model.to(device)
    local_model.eval()
    obs, info = env.reset()
    state = torch.from_numpy(obs).unsqueeze(0).to(device)
    done = True
    curr_step = 0
    actions = deque(maxlen=opt.max_actions)
    total_reward = 0
    total_steps = 1
    best_reward = 0
    while True:
        curr_step += 1
        if done:
            current_reward = total_reward / total_steps
            if current_reward > best_reward:
                best_reward = current_reward
                print("new best reward: {:.2f} at step {} ".format(best_reward, curr_step))
            common.save_best_model(current_reward, local_model.state_dict(), save_path, "ppo-third-best", keep_best=10)

            local_model.load_state_dict(global_model.state_dict())
        logits, value = local_model(state)
        policy = F.softmax(logits, dim=1)
        action = torch.argmax(policy).item()
        state, reward, done, trunc, info = env.step(action)
        done = done or trunc
        total_reward += reward
        total_steps += 1

        # Uncomment following lines if you want to save model whenever level is completed
        # if info["flag_get"]:
        #     print("Finished")
        #     torch.save(local_model.state_dict(),
        #                "{}/ppo_super_mario_bros_{}_{}_{}".format(opt.saved_path, opt.world, opt.stage, curr_step))

        # 不进行渲染
        env.render()
        actions.append(action)
        if curr_step > opt.num_global_steps or actions.count(actions[0]) == actions.maxlen:
            done = True
        if done:
            curr_step = 0
            actions.clear()
            state, _ = env.reset()
        state = torch.from_numpy(state).unsqueeze(0)
        state = state.to(device)