from absl import logging, flags, app
from environment.GoEnv import Go
import time, os
import numpy as np
from algorimths.policy_gradient import PolicyGradient
import tensorflow as tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"

FLAGS = flags.FLAGS

flags.DEFINE_integer("num_train_episodes", 20000,
                     "Number of training episodes for each base policy.")
flags.DEFINE_integer("num_eval", 1000,
                     "Number of evaluation episodes")
flags.DEFINE_integer("eval_every", 2000,
                     "Episode frequency at which the agents are evaluated.")
flags.DEFINE_integer("learn_every", 128,
                     "Episode frequency at which the agents learn.")
flags.DEFINE_list("hidden_layers_sizes", [
    128, 256
], "Number of hidden units in the policy-net and critic-net.")



def main(unused_argv):
    begin = time.time()
    env = Go()
    info_state_size = env.state_size
    num_actions = env.action_size

    # 网络结构超参数和初始化
    hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
    kwargs = {
        "pi_learning_rate": 1e-2,
        "critic_learning_rate": 1e-1,
        "batch_size": 128,
        "entropy_cost": 0.5,
        "max_global_gradient_norm": 20,
    }
    import agent.agent as agent
    ret1 = [0]
    ret2 = [0]
    max_len = 2000

    # 初始化graph和sess
    graph1 = tf.Graph()
    graph2 = tf.Graph()
    version = int(sorted(os.listdir("./saved_model/policy_a2c/"))[-1][-1])
    restore_path = "./saved_model/policy_a2c/"+sorted(os.listdir("./saved_model/policy_a2c"))[-1]+"/model.ckpt"

    with graph1.as_default():
        sess1 = tf.Session(graph=graph1)
        this = PolicyGradient(sess1, 0, info_state_size, num_actions, hidden_layers_sizes, **kwargs)
        sess1.run(tf.global_variables_initializer())
        this.restore(restore_path)

    with graph2.as_default():
        sess2 = tf.Session(graph=graph2)
        rival = PolicyGradient(sess2, 1, info_state_size, num_actions, hidden_layers_sizes, **kwargs)
        sess2.run(tf.global_variables_initializer())
        rival.restore(restore_path)

    agents = [this, rival]


    # 自我博弈和训练
    for ep in range(FLAGS.num_train_episodes):
        if (ep + 1) % FLAGS.eval_every == 0:
            losses1 = agents[0].loss
            losses2 = agents[1].loss
            logging.info("Episodes: {}: Losses: {}, Rewards: {}".format(ep+1, losses1, np.mean(ret1)))
            # logging.info("Episodes: {}: Losses: {}, Rewards: {}".format(ep+1, losses2, np.mean(ret2)))
            with open('log_pg_{}'.format(os.environ.get('BOARD_SIZE')), 'a+') as log_file:
                log_file.writelines("{}, {}\n".format(ep+1, np.mean(ret1)))
        time_step = env.reset()  # a go.Position object
        while not time_step.last():
            player_id = time_step.observations["current_player"]
            if player_id==0:
                with graph1.as_default():
                    agent_output = agents[player_id].step(time_step)
            else:
                with graph2.as_default():
                    agent_output = agents[player_id].step(time_step, is_evaluation=True, True)

            action_list = agent_output.action
            time_step = env.step(action_list)
        with graph1.as_default():
            agents[0].step(time_step)
        with graph2.as_default():
            agents[1].step(time_step)
        if len(ret1) < max_len:
            ret1.append(time_step.rewards[0])
            ret2.append(time_step.rewards[1])
        else:
            ret1[ep % max_len] = time_step.rewards[0]
            ret2[ep % max_len] = time_step.rewards[1]

    # 测试
    ret = []
    for ep in range(FLAGS.num_eval):
        time_step = env.reset()
        while not time_step.last():
            player_id = time_step.observations["current_player"]
            if player_id == 0:
                with graph1.as_default():
                    agent_output = agents[player_id].step(time_step, is_evaluation=True, True)
            else:
                with graph2.as_default():
                    agent_output = agents[player_id].step(time_step, is_evaluation=True, True)
            action_list = agent_output.action
            time_step = env.step(action_list)

        # Episode is over, step all agents with final info state.
        # for agent in agents:
        with graph1.as_default():
            agents[0].step(time_step, is_evaluation=True)
        with graph2.as_default():
            agents[1].step(time_step, is_evaluation=True)
        ret.append(time_step.rewards[0])
    print(np.mean(ret))

    # 保存模型
    if np.mean(ret) > 0:
        save_path = "./saved_model/policy_a2c/a2c-"+str(version+1)+"/model.ckpt"
        with graph1.as_default():
            os.system("mkdir ./saved_model/policy_a2c/a2c-"+str(version+1))
            this.save(save_path)

    print("iteration finished !!!!")
    print('Time elapsed:', time.time()-begin)


if __name__ == '__main__':
    app.run(main)
