from absl import logging, flags, app
from environment.GoEnv import Go
import time, os
import numpy as np
from algorimths.policy_gradient import PolicyGradient
from algorimths.dqn import DQN
import tensorflow as tf

FLAGS = flags.FLAGS

flags.DEFINE_integer("num_train_episodes", 100,
                     "Number of training episodes for each base policy.")
flags.DEFINE_integer("num_eval", 50,
                     "Number of evaluation episodes")
flags.DEFINE_integer("eval_every", 2000,
                     "Episode frequency at which the agents are evaluated.")
flags.DEFINE_integer("learn_every", 128,
                     "Episode frequency at which the agents learn.")
flags.DEFINE_list("hidden_layers_sizes_rollout", [
    # 128, 256
    128, 128
], "Number of hidden units in the rollout net")
flags.DEFINE_list("hidden_layers_sizes_policy", [
    # 128, 256
    128, 128
], "Number of hidden units in the policy net")
flags.DEFINE_list("hidden_layers_sizes_value", [
    128, 256
], "NUmber of hidden units in the value net")

flags.DEFINE_integer("replay_buffer_capacity", int(5e4),
                     "Size of the replay buffer.")
flags.DEFINE_integer("reservoir_buffer_capacity", int(2e6),
                     "Size of the reservoir buffer.")


def main(unused_argv):

    # 参数
    begin = time.time()
    env = Go()
    info_state_size = env.state_size
    num_actions = env.action_size

    hidden_layers_sizes_policy = [int(l) for l in FLAGS.hidden_layers_sizes_policy]
    hidden_layers_sizes_rollout = [int(l) for l in FLAGS.hidden_layers_sizes_rollout]
    hidden_layers_sizes_value = [int(l) for l in FLAGS.hidden_layers_sizes_value]
    kwargs_dqn = {
        "replay_buffer_capacity": FLAGS.replay_buffer_capacity,
        "epsilon_decay_duration": int(0.6*FLAGS.num_train_episodes),
        "epsilon_start": 0.8,
        "epsilon_end": 0.001,
        "learning_rate": 1e-3,
        "learn_every": FLAGS.learn_every,
        "batch_size": 128,
        "max_global_gradient_norm": 10,
    }
    kwargs_pg = {
        "pi_learning_rate": 1e-2,
        "critic_learning_rate": 1e-1,
        "batch_size": 128,
        "entropy_cost": 0.5,
        "max_global_gradient_norm": 20,
    }
    import agent.agent as agent
    ret = [0]


    # 初始化mini go agent和均匀随机MCTS agent
    graph1 = tf.Graph() # policy net
    graph2 = tf.Graph() # rollout net
    graph3 = tf.Graph() # value net

    restore_path_policy = "./saved_model/policy_dqn/"+sorted(os.listdir("./saved_model/policy_dqn"))[-1]+"/model.ckpt"
    # restore_path_policy = "./saved_model/policy_a2c/"+sorted(os.listdir("./saved_model/policy_a2c"))[-1]+"/model.ckpt"
    restore_path_rollout = "./saved_model/rollout_dqn/model.ckpt"
    # restore_path_rollout = "./saved_model/rollout_a2c/model.ckpt"
    restore_path_value = "./saved_model/policy_a2c/"+sorted(os.listdir("./saved_model/policy_a2c"))[-1]+"/model.ckpt"
    
    with graph1.as_default():
        sess1 = tf.Session(graph=graph1)
        # policy_net = PolicyGradient(sess1, 0, info_state_size, num_actions, hidden_layers_sizes_policy, **kwargs_pg)
        policy_net = DQN(sess1, 0, info_state_size, num_actions, hidden_layers_sizes_policy, **kwargs_dqn)
        policy_net.restore(restore_path_policy)
        sess1.run(tf.global_variables_initializer())

    with graph2.as_default():
        sess2 = tf.Session(graph=graph2)
        # rollout_net = PolicyGradient(sess2, 0, info_state_size, num_actions, hidden_layers_sizes_policy, **kwargs_pg)
        rollout_net = DQN(sess2, 0, info_state_size, num_actions, hidden_layers_sizes_policy, **kwargs_dqn)
        rollout_net.restore(restore_path_rollout)
        sess2.run(tf.global_variables_initializer())

    with graph3.as_default():
        sess3 = tf.Session(graph=graph3)
        value_net = PolicyGradient(sess3, 0, info_state_size, num_actions, hidden_layers_sizes_value, **kwargs_pg)
        value_net.restore(restore_path_value)
        sess3.run(tf.global_variables_initializer())

    MCTS_RL = agent.MCTSAgent(0, None, None, None,
                    0.5, 50, 50)
    # MCTS_random = agent.MCTSAgent(0, None, None, None, 0.5, 50, 50)
    random_agent = agent.RandomAgent(1)

    agents = [MCTS_RL, random_agent]
    # agents = [random_agent, MCTS_RL]


    # agent对弈
    ret = []
    for ep in range(FLAGS.num_eval):
        print("episode", ep, ": ", end="")
        time_step = env.reset()
        while not time_step.last():
            player_id = time_step.observations["current_player"]
            agent_output = agents[player_id].step(time_step, env)
            action_list = agent_output.action
            time_step = env.step(action_list)
            # print(time_step.observations["info_state"][0].reshape(5, 5))
            # print((time_step.observations["info_state"][0]).reshape(5,5))
            # print((time_step.observations["info_state"][1]).reshape(5, 5))
        print("score: ", time_step.rewards[0])
        ret.append(time_step.rewards[0])
    # print(np.sum(ret))

    print("during {} tests, MCTS with networks won {} games!".format(FLAGS.num_eval, np.sum(ret)+(FLAGS.num_eval-np.sum(ret))/2))

    

if __name__ == '__main__':
    app.run(main)
