import os
import time
from multiprocessing import Manager, Pool, Process

from absl import app

from evaluator import Evaluator

from trainer import Trainer
from utils import get_time, get_checkpoints_path


def train(kwargs):
    trainer = Trainer(**kwargs)
    trainer.run()
    return trainer.get_checkpoint_paths()


def evaluate(kwargs):
    evaluator = Evaluator(**kwargs)
    evaluator.run()


def get_train_args():
    """train的默认参数"""
    kwargs = dict(
        game="oshi_zumo",
        game_setting=dict(coins=6, size=1),
        n=6,
        k=1,
        max_learn_rate=0.1,
        min_learn_rate=1e-3,
        is_extra_grad_method=True,
        to_decay=True,
        debug=debug,
        policy_type="direct",  # 直接参数化还是神经网络
    )

    if debug:
        kwargs["learn_eps"] = 3
        kwargs["max_update_times"] = 10
        kwargs["learn_rate_decay_interval"] = 4
        kwargs["save_interval"] = 5
        kwargs["max_procs"] = 2
    else:
        kwargs["learn_eps"] = 300
        kwargs["max_update_times"] = 5e4
        kwargs["learn_rate_decay_interval"] = 5e3  # 学习率下降间隔，当to_decay生效，才有用
        kwargs["save_interval"] = 20
        kwargs["max_procs"] = 10

    kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
                          f"{kwargs['max_learn_rate']}_" \
                          f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
                          f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}_" \
                          f"{kwargs['policy_type']}"

    return kwargs


def set_eval_args(checkpoint_dir, lock, current_player_id, kwargs):
    """eval的默认参数"""

    kwargs["checkpoint_dir"] = checkpoint_dir
    kwargs["checkpoint"] = os.path.basename(checkpoint_dir).split(".")[0]
    kwargs["lock"] = lock
    kwargs["current_player_id"] = current_player_id
    kwargs["is_extra_grad_method"] = False  # 评估
    kwargs["to_decay"] = False  # 评估
    kwargs["max_learn_rate"] = 0.5

    # debug时的设置 eval和train一样
    if kwargs["debug"]:
        kwargs["max_eval_eps"] = 10
        kwargs["max_update_times"] = 5e1

    else:
        kwargs["max_eval_eps"] = 1000
        kwargs["max_update_times"] = 2e4

    return kwargs


def main(args):
    kwargs = get_train_args()
    processes = []

    for i in range(11, 13):
        print("train: ", i)
        kwargs["max_learn_rate"] = 0.9
        kwargs["to_decay"] = False
        kwargs["is_extra_grad_method"] = True
        kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
                              f"{kwargs['max_learn_rate']}_" \
                              f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
                              f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}_" \
                              f"{kwargs['policy_type']}_" \
                              f"No{i}"
        # train(kwargs)

        p = Process(target=train, args=(kwargs,))
        p.start()
        processes.append(p)
        time.sleep(1)

    for proc in processes:
        proc.join()


def multi_train(args):
    pool = Pool(2)

    kwargs = get_train_args()
    ############################################  eg 固定学习率
    for lr in [0.4, 0.6]:
        kwargs["max_learn_rate"] = lr
        kwargs["max_update_times"] = 5e5
        kwargs["save_interval"] = 500
        kwargs["to_decay"] = False
        kwargs["is_extra_grad_method"] = True
        kwargs["train_dir"] = f"{'eg' if kwargs['is_extra_grad_method'] else 'mm'}_" \
                              f"{kwargs['max_learn_rate']}_" \
                              f"{kwargs['min_learn_rate'] if kwargs['to_decay'] else '-'}_" \
                              f"{kwargs['learn_rate_decay_interval'] if kwargs['to_decay'] else 'solid'}_" \
                              f"{kwargs['policy_type']}"

        pool.apply_async(train, (kwargs,))
        time.sleep(1)
    pool.close()
    pool.join()


def multi_eval(args):
    kwargs = get_train_args()

    for train_dir in ["eg_1_-_solid_direct"]:

        pool = Pool(10)
        m = Manager()
        lock = m.Lock()

        kwargs["train_dir"] = train_dir  # 关键，保存的内容，由它指定
        checkpoint_dir = "train_info/" + kwargs["train_dir"] + "/checkpoints/"
        path_list = get_checkpoints_path(checkpoint_dir)

        paths = path_list[24::25]

        for path in paths:  # per500， 100个点
            for player_id in range(2):
                kwargs = set_eval_args(path, lock, player_id, kwargs)
                # 动态评估参数，在此处设置
                pool.apply_async(evaluate, (kwargs,))
                time.sleep(0.5)

        pool.close()
        pool.join()
        time.sleep(1)


if __name__ == '__main__':
    debug = False

    app.run(main)
