import logging
import os
import numpy as np
import argparse
import warnings
import yaml
from os.path import join as pjoin
import sys
sys.path.append(sys.path[0] + "/..")

import torch
from agent import RLAgent

from helpers.generic import get_experiment_dir

from helpers.setup_logger import setup_logging, log_git_commit
logger = logging.getLogger(__name__)

import gym
import gym_textworld  # Register all textworld environments.
from helpers.generic import get_experiment_dir, dict2list
import textworld


def build_agent(env):
    word_vocab = dict2list(env.observation_space.id2w)
    word2id = {}
    for i, w in enumerate(word_vocab):
        word2id[w] = i

    # collect all nouns
    verb_list = ["go", "take"]
    object_name_list = ["east", "west", "north", "south", "coin"]
    verb_map = [word2id[w] for w in verb_list if w in word2id]
    noun_map = [word2id[w] for w in object_name_list if w in word2id]
    agent = RLAgent(config, word_vocab, verb_map, noun_map, load_pretrained=True)

    return agent


def agent_play(config):
    env_id = gym_textworld.make(config['general']['env_id'])
    env = gym.make(env_id)

    agent = build_agent(env)
    agent.model.eval()

    obs, infos = env.reset()
    agent.reset(infos)
    prev_actions = [""]

    # env.render()

    this_rewards = 0
    this_steps = 0
    all_rewards = 0
    all_episodes = 0

    while True:
        input_description, description_id_list = agent.get_game_step_info([obs], [infos], prev_actions)
        v_idx, n_idx, chosen_strings, state_representation = agent.generate_one_command(input_description)
        obs, reward, done, infos = env.step(chosen_strings[0])
        prev_actions = chosen_strings

        # env.render()

        this_rewards += reward
        this_steps += 1

        if done:
            obs, infos = env.reset()
            agent.reset(infos)
            prev_actions = [""]

            all_rewards += this_rewards
            all_episodes += 1

            print("当前轮数: {}\t本轮奖赏: {}\t本轮时间步: {}".format(all_episodes, this_rewards, this_steps))

            this_rewards = 0
            this_steps = 0



if __name__ == '__main__':
    for _p in ['saved_models']:
        if not os.path.exists(_p):
            os.mkdir(_p)
    parser = argparse.ArgumentParser(description="train network.")
    parser.add_argument("-c", "--config_dir", default='config_macbook', help="the default config directory")
    parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
    parser.add_argument("-vv", "--very-verbose", help="print out warnings", action="store_true")
    args = parser.parse_args()

    if args.very_verbose:
        args.verbose = args.very_verbose
        warnings.simplefilter("default", textworld.TextworldGenerationWarning)

    # Read config from yaml file.
    config_file = pjoin(args.config_dir, 'config.yaml')
    with open(config_file) as reader:
        config = yaml.safe_load(reader)

    default_logs_path = get_experiment_dir(config)
    setup_logging(default_config_path=pjoin(args.config_dir, 'logging_config.yaml'),
                  default_level=logging.INFO, add_time_stamp=True,
                  default_logs_path=default_logs_path)
    log_git_commit(logger)

    agent_play(config)
