from run import run
import time
from multiprocessing import Pool
import contextlib  # 新增导入


class Config:
    def __init__(self):
        pass


BATCH_SIZE = 32
DATASETS = ["JUNYI", "ASSIST2009", "ASSIST2017", "NIPS2020"]
MODELS = ["DINA", "MIRT", "NCDM", "IRT", "MCD", "KaNCD"]

Hyperparameters = {
    "ASSIST2009": {
        # MCD
        "MCD": {
            "lr": 0.000190,
            "batch_size": 127,
        },
        # KaNCD
        "KaNCD": {
            "lr": 0.000624,
            "batch_size": 33,
        },
        # MIRT
        "MIRT": {
            "lr": 0.000089,
            "batch_size": 93,
        },
        # IRT
        "IRT": {
            "lr": 0.003063,
            "batch_size": 128,
        },
        # DINA
        "DINA": {
            "lr": 0.003464,
            "batch_size": 24,
        },
        # NCDM
        "NCDM": {
            "lr": 0.000094,
            "batch_size": 127,
        },
    },
    "JUNYI": {
        # DINA
        "DINA": {
            "lr": 0.002311,
            "batch_size": 43,
        },
        # IRT
        "IRT": {
            "lr": 0.016348,
            "batch_size": 106,
        },
        # MCD
        "MCD": {
            "lr": 0.000498,
            "batch_size": 101,
        },
        # KaNCD
        "KaNCD": {
            "lr": 0.000026,
            "batch_size": 77,
        },
        # MIRT
        "MIRT": {
            "lr": 0.000045,
            "batch_size": 20,
        },
        # NCDM
        "NCDM": {
            "lr": 0.000081,
            "batch_size": 16,
        },
    },
    "NIPS2020": {
        # NCDM
        "NCDM": {
            "lr": 0.000021,
            "batch_size": 23,
        },
        # DINA
        "DINA": {
            "lr": 0.027397,
            "batch_size": 92,
        },
        # IRT
        "IRT": {
            "lr": 0.038698,
            "batch_size": 104,
        },
        # KaNCD
        "KaNCD": {
            "lr": 0.000490,
            "batch_size": 16,
        },
        # MCD
        "MCD": {
            "lr": 0.000012,
            "batch_size": 20,
        },
        # MIRT
        "MIRT": {
            "lr": 0.000019,
            "batch_size": 16,
        },
    },
    "ASSIST2017": {
        # NCDM
        "NCDM": {
            "lr": 0.000049,
            "batch_size": 32,
        },
        # DINA
        "DINA": {
            "lr": 0.004987,
            "batch_size": 69,
        },
        # IRT
        "IRT": {
            "lr": 0.003576,
            "batch_size": 120,
        },
        # MCD
        "MCD": {
            "lr": 0.000118,
            "batch_size": 116,
        },
        # MIRT
        "MIRT": {
            "lr": 0.000029,
            "batch_size": 18,
        },
        # KaNCD
        "KaNCD": {
            "lr": 0.000028,
            "batch_size": 26,
        },
    },
}

LR = 0.01
EPOCH = 200
DEVICE = "auto"
MODEL = "all"


def create_config(seed, dataset, device, model):
    """创建并返回配置对象"""
    cfg = Config()
    lr = 0
    batch_size = 0
    if dataset in Hyperparameters:
        if model in Hyperparameters[dataset]:
            lr = Hyperparameters[dataset][model].get("lr", LR)
            batch_size = Hyperparameters[dataset][model].get("batch_size", BATCH_SIZE)
        else:
            lr = Hyperparameters[dataset]["default"].get("lr", LR)
            batch_size = Hyperparameters[dataset]["default"].get(
                "batch_size", BATCH_SIZE
            )
    else:
        lr = Hyperparameters["DEFAULT"]["default"].get("lr", LR)
        batch_size = Hyperparameters["DEFAULT"]["default"].get("batch_size", BATCH_SIZE)
    setattr(cfg, "seed", seed)
    setattr(cfg, "batch_size", batch_size)
    setattr(cfg, "lr", lr)
    setattr(cfg, "epoch", EPOCH)
    setattr(cfg, "device", device)
    setattr(cfg, "dataset", dataset)
    setattr(cfg, "model", model)
    setattr(cfg, "less_data", False)
    setattr(cfg, "early_stop", True)
    setattr(cfg, "save", True)
    return cfg


# 新增包装函数
def run_wrapper(seed, dataset, device, model, stdout_file, stderr_file):
    """处理输出重定向的包装函数"""
    config = create_config(seed, dataset, device, model)
    with open(stdout_file, "w") as f_out, open(stderr_file, "w") as f_err:
        with contextlib.redirect_stdout(f_out), contextlib.redirect_stderr(f_err):
            run(config)


if __name__ == "__main__":
    import os

    if not os.path.exists("output"):
        os.makedirs("output")
    DEVICES_NUM = 8
    devices = [f"cuda:{i}" for i in range(DEVICES_NUM)]
    num_processes = len(devices)

    # 生成所有任务参数：3个seed × 4个数据集
    seeds = [int(time.time()) + i for i in range(3)]
    tasks = []
    for seed in seeds:
        for i, dataset in enumerate(DATASETS):
            for model in MODELS:
                device = devices[i % len(devices)]
                # 生成唯一文件名（包含seed和dataset信息）
                stdout_file = f"output/{model}_output_{seed}_{dataset}.log"
                stderr_file = f"output/{model}_error_{seed}_{dataset}.log"
                tasks.append((seed, dataset, device, model, stdout_file, stderr_file))

    # 使用进程池并行执行
    with Pool(num_processes) as pool:
        # 注意这里改为调用包装函数
        pool.starmap_async(run_wrapper, tasks)
        pool.close()
        pool.join()

    print("All done!")
