
import os, time
import shutil
from multiprocessing import Manager, Pool

from absl import app

from evaluator import Evaluator
from trainer import Trainer
from utils import get_logger, get_time, get_sub_paths


def gen_evaluator(checkpoint_dir, lock, current_player_id, kwargs):
    kwargs["checkpoint_dir"] = checkpoint_dir
    kwargs["checkpoint"] = [i for i in checkpoint_dir.split("/") if i != ""][-1]
    kwargs["lock"] = lock
    kwargs["current_player_id"] = current_player_id
    kwargs["is_extra_grad_method"] = False  # 评估
    kwargs["to_decay"] = False  # 评估
    kwargs["is_part_train"] = True
    kwargs["max_learn_rate"] = 1e-3

    # debug时的设置 eval和train一样
    if kwargs["debug"]:
        kwargs["max_eval_eps"] = 10
        kwargs["max_update_times"] = 5e1

    else:
        kwargs["max_eval_eps"] = 1000
        kwargs["max_update_times"] = 1e3

    evaluator = Evaluator(**kwargs)
    evaluator.run()


def get_args():
    kwargs = dict(
        game="oshi_zumo",
        n=6,
        k=1,
        max_learn_rate=1e-3,
        min_learn_rate=1e-6,
        is_extra_grad_method=True,
        to_decay=False,
        debug=debug,
        hidden_layers_sizes=(32, 16),
        policy_type="net"  # 直接参数化还是神经网络

    )

    if debug:
        kwargs["sample_eps"] = 300
        kwargs["max_update_times"] = 10
        kwargs["learn_rate_decay_interval"] = 1
        kwargs["save_interval"] = 1
        kwargs["max_procs"] = 2
    else:
        kwargs["sample_eps"] = 300
        kwargs["max_update_times"] = 1e4
        kwargs["learn_rate_decay_interval"] = 1  # 学习率下降间隔，当to_decay生效，才有用
        kwargs["save_interval"] = 1
        kwargs["max_procs"] = 10

    kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
                          f"{kwargs['policy_type']}_" \
                          f"{kwargs['max_learn_rate']}_" \
                          f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
                          f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}"
    kwargs["game_setting"] = dict(coins=6, size=1, horizon=15)

    return kwargs


def train(kwargs):
    trainer = Trainer(**kwargs)
    trainer.run()
    return trainer.get_checkpoint_paths()


def multi_train(args):
    pool = Pool(3)
    kwargs = get_args()
    #################################################
    kwargs["to_decay"] = False
    for lr in [1e-3, 5e-4, 1e-4]:
        kwargs["max_learn_rate"] = lr
        kwargs["is_extra_grad_method"] = True
        kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
                              f"{kwargs['max_learn_rate']}_" \
                              f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
                              f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}_" \
                              f"{kwargs['policy_type']}"
        pool.apply_async(train, (kwargs,))
        time.sleep(1)

    # ################################################学习率衰减 1==>1e-4
    #
    # kwargs["to_decay"] = True
    # kwargs["max_learn_rate"] = 1
    # kwargs["min_learn_rate"] = 1e-4
    # kwargs["learn_rate_decay_interval"] = 1
    # kwargs["is_extra_grad_method"] = True
    # kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
    #                       f"{kwargs['policy_type']}_" \
    #                       f"{kwargs['max_learn_rate']}_" \
    #                       f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
    #                       f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}"
    # pool.apply_async(train, (kwargs,))
    # time.sleep(1)
    #
    # ################################################学习率衰减 0.1==>1e-4
    #
    # kwargs["to_decay"] = True
    # kwargs["max_learn_rate"] = 0.1
    # kwargs["min_learn_rate"] = 1e-4
    # kwargs["learn_rate_decay_interval"] = 1
    # kwargs["is_extra_grad_method"] = True
    # kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
    #                       f"{kwargs['policy_type']}_" \
    #                       f"{kwargs['max_learn_rate']}_" \
    #                       f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
    #                       f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}"
    # pool.apply_async(train, (kwargs,))
    # time.sleep(1)
    #
    # ################################################学习率衰减 0.1==>1e-3
    #
    # kwargs["to_decay"] = True
    # kwargs["max_learn_rate"] = 0.1
    # kwargs["min_learn_rate"] = 1e-3
    # kwargs["learn_rate_decay_interval"] = 1
    # kwargs["is_extra_grad_method"] = True
    # kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
    #                       f"{kwargs['policy_type']}_" \
    #                       f"{kwargs['max_learn_rate']}_" \
    #                       f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
    #                       f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}"
    # pool.apply_async(train, (kwargs,))
    # time.sleep(1)
    #
    # ################################################学习率衰减 1==>1e-3
    #
    # kwargs["to_decay"] = True
    # kwargs["max_learn_rate"] = 1
    # kwargs["min_learn_rate"] = 1e-3
    # kwargs["learn_rate_decay_interval"] = 1
    # kwargs["is_extra_grad_method"] = True
    # kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
    #                       f"{kwargs['policy_type']}_" \
    #                       f"{kwargs['max_learn_rate']}_" \
    #                       f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
    #                       f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}"
    # pool.apply_async(train, (kwargs,))
    # time.sleep(1)

    pool.close()
    pool.join()


def eval(args):
    kwargs = get_args()
    m = Manager()
    lock = m.Lock()
    pool = Pool(15)
    # train_dirs = os.listdir("train_info8/")
    train_dirs = ["eg6k1_0.0001_18111242"]

    for train_dir in train_dirs:

        kwargs["train_dir"] = train_dir
        checkpoint_dir = "train_info/" + kwargs["train_dir"] + "/checkpoints/"
        path_list = get_sub_paths(checkpoint_dir)

        path_list_eval = path_list[:100][::2] + path_list[100:][::8]
        for path in path_list_eval:
            for player_id in range(2):
                pool.apply_async(gen_evaluator, (path, lock, player_id, kwargs))
                time.sleep(0.5)

        pool.close()
        pool.join()


def sigle_train(args):
    kwargs = get_args()
    kwargs["max_update_times"] = 1e4
    kwargs["to_decay"] = False
    kwargs["max_learn_rate"] = 0.01
    kwargs["min_learn_rate"] = 1e-4
    kwargs["learn_rate_decay_interval"] = 1
    kwargs["is_extra_grad_method"] = True
    kwargs["train_dir"] = "debug"

    path = f"train_info/{kwargs['train_dir']}/"
    if os.path.exists(path):
        shutil.rmtree(path)

    train(kwargs)


def main(args):
    kwargs = get_args()
    kwargs["max_learn_rate"] = 1e-3
    kwargs["is_extra_grad_method"] = True
    kwargs["max_procs"] = 10

    kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
                          f"{kwargs['max_learn_rate']}_" \
                          f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
                          f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}_" \
                          f"{kwargs['policy_type']}"

    m = Manager()
    lock = m.Lock()
    pool = Pool(kwargs.get("max_procs"))

    path_list = train(kwargs)

    # 创建评估进程池
    for path in path_list:
        for player_id in range(2):
            pool.apply_async(gen_evaluator, (path, lock, player_id, kwargs))

    pool.close()
    pool.join()


if __name__ == '__main__':
    debug = False
    app.run(eval)
