import os
import subprocess
import multiprocessing as mp


def run_commands(arg_list: list):
    # print(arg_list)
    subprocess.call(" ".join(arg_list), shell=True)


def clean():
    os.system("rm -rf ./train_model/*")


def run_baseline():
    base_command = "python main_KG_free.py --cuda --gpu_id 2 --print_every 30"
    embed_dim_list = [8, 16, 32, 64]
    lr_list = [0.1, 0.01, 0.001]
    p = mp.Pool(12)
    for dim in embed_dim_list:
        for lr in lr_list:
            arg_list = [base_command]
            arg_list.extend([f"--embed_dim {dim}", f"--lr {lr}"])
            p.apply_async(run_commands, (arg_list,))
    p.close()
    p.join()
    return 0


def run_embedding():
    base_command = "python main_Embedding_based.py --cuda --gpu_id 2 --print_every 30"
    embed_dim_list = [8, 16, 32, 64]
    embedding_type_list = ["TransE", "TransR"]
    lr_list = [0.1, 0.01, 0.001]
    p = mp.Pool(12)
    for em_type in embedding_type_list:
        for lr in lr_list:
            for dim in embed_dim_list:
                arg_list = [base_command]
                arg_list.extend(
                    [
                        f"--embed_dim {dim}",
                        f"--relation_dim {dim}",
                        f"--KG_embedding_type {em_type}",
                        f"--lr {lr}",
                    ]
                )
                p.apply_async(run_commands, (arg_list,))
    p.close()
    p.join()


def run_gnn():
    base_command = "python main_GNN_based.py --cuda --gpu_id 2 --print_every 30"
    embed_dim_list = [16, 32, 64]
    embedding_type_list = ["TransE", "TransR"]
    lr_list = [0.1, 0.01, 0.001]
    laplacian_type_list = ["symmetric", "random-walk"]
    aggregation_type_list = ["gcn", "graphsage", "lightgcn"]
    layers_list = [2, 4, 8]
    p = mp.Pool(processes=2)
    
    base_pth = "./trained_model/Douban/GNN_based"
    
    for lr in lr_list:
        for em_type in embedding_type_list:
            for lap_type in laplacian_type_list:
                for agg_type in aggregation_type_list:
                    for nlayers in layers_list:
                        for dim in embed_dim_list:
                            arg_list = [base_command]
                            arg_list.extend(
                                [
                                    f"--embed_dim {dim}",
                                    f"--relation_dim {dim}",
                                    f"--laplacian_type {lap_type}",
                                    f"--KG_embedding_type {em_type}",
                                    f"--aggregation_type {agg_type}",
                                    f"--lr {lr}",
                                    f"--n_layers {nlayers}",
                                ]
                            )
                            
                            pth = os.path.join(
                                base_pth,
                                f"nlayer{nlayers}_dim{dim}_lr{lr}_{lap_type}_{em_type}_{agg_type}",
                                "metrics0.tsv",
                            )
                            try:
                                open(pth)
                            except:
                                p.apply_async(run_commands, (arg_list,))
    p.close()
    p.join()


def run_all():
    p = mp.Pool(processes=3)
    p.apply_async(run_baseline)
    p.apply_async(run_embedding)
    p.apply_async(run_gnn)
    p.close()
    p.join()


if __name__ == "__main__":
    # run_baseline()
    # run_embedding()
    run_gnn()
