import argparse
import time
import torch
import numpy as np
import pandas as pd
from pathlib import Path
import json
from torch.utils.data import DataLoader, TensorDataset
from model import *
from train_and_eval import train, evaluate
import os

info = print

datasets = {
    "JUNYI": "junyi",
    "ASSIST2009": "a0910",
    "ASSIST2017": "assistment-2017",
    "NIPS2020": "NIPS2020",
}

MODELS = ["DINA", "MIRT", "NCDM", "IRT", "MCD", "KaNCD"]


# 参数seed、batch_size、lr、epoch、device、dataset、model
parser = argparse.ArgumentParser()
parser.add_argument(
    "--seed",
    type=int,
    default=int(time.time()),
    help="random seed, default is current timestamp",
)
parser.add_argument(
    "--batch-size", type=int, default=32, help="batch size, default is 32"
)
parser.add_argument(
    "--lr", type=float, default=0.01, help="learning rate, default is 0.01"
)
parser.add_argument("--epoch", type=int, default=200, help="epoch number, default is 5")
parser.add_argument(
    "--device",
    type=str,
    default="auto",
    help="device, default is auto",
)
parser.add_argument(
    "--dataset",
    type=str,
    default="ASSIST2009",
    help="dataset name, default is ASSIST2009",
    choices=datasets.keys(),
)
parser.add_argument(
    "--model",
    type=str,
    default="all",
    help="model name, default is all",
    choices=["all"] + MODELS,
)
parser.add_argument(
    "--less-data",
    action=argparse.BooleanOptionalAction,
    default=False,
    help="use less data for debug",
)
parser.add_argument(
    "--early-stop",
    action=argparse.BooleanOptionalAction,
    help="use early stop",
    default=True,
)
parser.add_argument(
    "--save",
    action=argparse.BooleanOptionalAction,
    help="save model and result",
    default=True,
)


def run(args):
    dataset = datasets[args.dataset]

    info("Setting LR: %f" % args.lr)

    device = "cpu"
    if args.device == "auto":
        device = "cuda" if torch.cuda.is_available() else "cpu"
    elif args.device == "cuda":
        device = "cuda"
    else:
        device = args.device

    info("Using device: %s" % device)

    # 设置随机种子
    def set_seed(seed):
        torch.manual_seed(seed)
        np.random.seed(seed)
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

    set_seed(args.seed)
    info("Seed: %d" % args.seed)

    info("Dataset: %s" % dataset)
    ## Load data
    BASE_DIR = Path(".") / "dataset" / dataset

    train_data = pd.read_csv(BASE_DIR / "train.csv")
    test_data = pd.read_csv(BASE_DIR / "test.csv")
    valid_data = pd.read_csv(BASE_DIR / "valid.csv")
    item_data = pd.read_csv(BASE_DIR / "item.csv")

    if args.less_data:
        info("Using less data")
        # 保留最后2%的数据
        train_data = train_data.iloc[-int(len(train_data) * 0.02) :]
        test_data = test_data.iloc[-int(len(test_data) * 0.02) :]
        valid_data = valid_data.iloc[-int(len(valid_data) * 0.02) :]

    with open(BASE_DIR / "describe.json") as f:
        describe = json.load(f)

    user_num = describe["user_num"] + 1
    item_num = describe["item_num"] + 1
    knowledge_num = describe["knowledge_num"] + 1

    # score 转换为整数
    train_data["score"] = train_data["score"].apply(int)
    test_data["score"] = test_data["score"].apply(int)
    valid_data["score"] = valid_data["score"].apply(int)

    if args.model not in ["MIRT", "IRT", "MCD"]:

        def onehot_knowledge(knowledge_code):
            t = knowledge_code[1:-1].split(",")
            res = [0] * knowledge_num
            for ti in t:
                if int(ti) - 1 >= knowledge_num:
                    print(ti, knowledge_num)
                res[int(ti) - 1] = 1
            return res

        item_data["knowledge"] = item_data["knowledge_code"].apply(onehot_knowledge)
        item_data = item_data.drop(columns=["knowledge_code"])

        dina_train_data = pd.merge(train_data, item_data, on="item_id")
        dina_test_data = pd.merge(test_data, item_data, on="item_id")
        dina_valid_data = pd.merge(valid_data, item_data, on="item_id")
        info(
            "Dataset Length: train %d, test %d, valid %d"
            % (len(dina_train_data), len(dina_test_data), len(dina_valid_data))
        )
    else:
        info(
            "Dataset Length: train %d, test %d, valid %d"
            % (len(train_data), len(test_data), len(valid_data))
        )

    batch_size = args.batch_size
    info("Batch size: %d" % batch_size)

    def transform(x, y, k, z, batch_size, **params):
        x = np.array(x)
        y = np.array(y)
        z = np.array(z)
        if k is not None:
            dataset = TensorDataset(
                torch.tensor(x, dtype=torch.int64),
                torch.tensor(y, dtype=torch.int64),
                torch.tensor(k, dtype=torch.float32),
                torch.tensor(z, dtype=torch.float32),
            )
        else:
            dataset = TensorDataset(
                torch.tensor(x, dtype=torch.int64),
                torch.tensor(y, dtype=torch.int64),
                torch.tensor(z, dtype=torch.float32),
            )
        return DataLoader(dataset, batch_size=batch_size, **params)

    if args.model == "all":
        model_names = MODELS
    else:
        model_names = [args.model]

    info("Preparing data")
    loaders = {}
    for name in model_names:
        if name in ["MIRT", "IRT", "MCD"]:
            loaders[name] = [
                transform(
                    data["user_id"],
                    data["item_id"],
                    None,
                    data["score"],
                    batch_size=batch_size,
                )
                for data in [train_data, valid_data, test_data]
            ]
        else:
            loaders[name] = [
                transform(
                    data["user_id"],
                    data["item_id"],
                    data["knowledge"],
                    data["score"],
                    batch_size=batch_size,
                )
                for data in [dina_train_data, dina_valid_data, dina_test_data]
            ]

    info("Data loaded")

    models = {}
    for name in model_names:
        if name == "DINA":
            models[name] = DINA(user_num, item_num, knowledge_num)
        elif name == "MIRT":
            models[name] = MIRT(user_num, item_num, knowledge_num, None)
        elif name == "NCDM":
            models[name] = NCDM(user_num, item_num, knowledge_num)
        elif name == "IRT":
            models[name] = IRT(user_num, item_num, 1, 1)
        elif name == "MCD":
            models[name] = MCD(user_num, item_num, knowledge_num)
        elif name == "KaNCD":
            models[name] = KaNCD(user_num, item_num, knowledge_num, 20, "gmf")
        else:
            raise ValueError("Unknown model %s" % name)

        models[name] = models[name].to(device)

    info("Model loaded")

    def create_dir(path):
        # 递归检查目录是否存在，不存在则创建
        path = Path(path)
        if not path.exists():
            os.makedirs(path)

    results = {}

    for name in model_names:
        aucs, accs, rmses = train(
            models[name],
            loaders[name][0],
            loaders[name][1],
            epochs=args.epoch,
            lr=args.lr,
            name=name,
            device=device,
            early_stop=args.early_stop,
        )

        results[f"{name} on {args.dataset}"] = {
            "auc": aucs[-1],
            "acc": accs[-1],
            "rmse": rmses[-1],
        }
        auc, acc, rmse = evaluate(
            models[name], loaders[name][2], name=name, device=device
        )
        if args.save:
            create_dir(f"results/{dataset}/{name}")
            with open(f"results/{dataset}/{name}/result_{args.seed}.json", "w") as f:
                json.dump({"auc": auc, "acc": acc, "rmse": rmse}, f)
            torch.save(
                models[name].state_dict(),
                f"results/{dataset}/{name}/model_{args.seed}.pth",
            )
        else:
            info(f"{name} on {args.dataset}: auc {auc}, acc {acc}, rmse {rmse}")
    return results


if __name__ == "__main__":
    args = parser.parse_args()
    run(args)
