"""
    check_dataset
    check_sampler
    check_model
    check_loss

    Procedure.TRAIN(dataset, model, loss)
    model.load_static_dict(file)
    Procedure.TEST(dataset, model, )

"""
# import sys
# sys.path.append("..")
from datetime import datetime
from utils import logCof, set_seed
from deepctr_torch.inputs import SparseFeat, DenseFeat
from deepctr_torch.models import DeepFM

from pprint import pprint
from dataloader import check_dataset, get_dataset
# from sampler import check_sampler, get_sampler
from Procedure import Procedure
from torch.optim import Adam
from loss import getLoss
from config import config_print
import torch


def main(config, logger):
    config["device"] = "cpu" if not torch.cuda.is_available() else "cuda:" + str(config["cuda"])
    config_print(config)

    set_seed(config["seed"])
    print(">>SEED:", config["seed"])

    # 检测各模块是否存在
    check_dataset(config["dataset_name"])
    # check_sampler(config["sampler"])
    # check_model(config["model"])

    dataset = get_dataset(config["dataset_path"], config["dataset_name"],
                          config["sampler"], config["train_way"], config["device"])

    if dataset.has_sideinfo:
        sparse_features = {}
        for name in dataset.users_info.columns:
            sparse_features.update({name: dataset.users_info[str(name)].nunique()})
        for name in dataset.items_info.columns:
            sparse_features.update({name: dataset.items_info[str(name)].nunique()})
    else:
        sparse_features = {"userInt": dataset.users_num, "newsInt": dataset.items_num}
    print("sparse_features\n", sparse_features)
    # dense_features = [str(x) for x in range(100)]
    dense_features = []
    fixlen_feature_columns = [SparseFeat(feat, sparse_features[feat], embedding_dim=config["embedding_dim"])
                              for feat in sparse_features] + [DenseFeat(feat, 1, ) for feat in dense_features]
    model = DeepFM(linear_feature_columns=fixlen_feature_columns,
                      dnn_feature_columns=fixlen_feature_columns,
                      lr=config["lr"],
                      l2_reg_embedding=config["l2_reg_embedding"],
                      l2_reg_dnn=config["l2_reg_dnn"],
                      device=config["device"],
                      dnn_hidden_units=(config["layer_size1"], config["layer_size2"]),  #
                      # dnn_hidden_units=(),  #
                      dnn_dropout=config["dropout"],
                      dnn_use_bn=True
                      )
    model.to(config["device"])
    loss_fun = getLoss(config["loss"], model, config, dataset)
    opt = Adam(model.parameters(), lr=config['lr'])

    procedure = Procedure(dataset, model, loss_fun, opt, config)
    procedure.fit(config["epoch_num"], config["batch_size"])
    procedure.test()




if __name__ == "__main__":
    from config import config
    import logging
    current_time = datetime.now().strftime('%Y%m%d%H%M%S')
    # logger = logging.getLogger(config["project_name"])
    # log_file_name = config["project_name"] + "_" + current_time + ".log"

    # logger = logCof(logger, "../log/", log_file_name)
    # logger.info(config)
    main(config, None)
