'''
Created on Mar 1, 2020
Pytorch Implementation of LightGCN in
Xiangnan He et al. LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation

@author: Jianbai Ye (gusye@mail.ustc.edu.cn)
'''
import argparse


def parse_args():
    parser = argparse.ArgumentParser(description="Go lightGCN")

    # 总体控制
    parser.add_argument('--dataset', type=str, default='gowalla',
                        help="available datasets: [lastfm, gowalla, yelp2018, amazon-book]")
    parser.add_argument('--model', type=str, default='lgn_hash', help='rec-model, support [mf, lgn, lgn_hash]')
    parser.add_argument('--loss', type=str, default='bpr',
                        help='choose loss, support [bce, bpr, sauc_for_sample, sauc_for_user]')

    # model
    parser.add_argument('--recdim', type=int, default=64,
                        help="the embedding size of lightGCN")
    parser.add_argument('--layer', type=int, default=3,
                        help="the layer num of lightGCN")
    parser.add_argument('--dropout', type=int, default=0,
                        help="using the dropout or not")
    parser.add_argument('--keepprob', type=float, default=0.6,
                        help="the batch size for bpr loss training procedure")
    parser.add_argument('--a_fold', type=int, default=100,
                        help="the fold num used to split large adj matrix, like gowalla")

    # train
    parser.add_argument('--pretrain', type=int, default=0, help='whether we use pretrained weight or not')
    parser.add_argument('--epochs', type=int, default=200)
    parser.add_argument('--batch_size', type=int, default=2 ** 18,
                        help="the batch size for bpr loss training procedure")
    parser.add_argument('--lr', type=float, default=0.0001,
                        help="the learning rate")
    parser.add_argument('--is_use_early_stop', type=int, default=1, help="wheather early stop")

    # loss
    parser.add_argument('--weight_decay', type=float, default=1e-3,
                        help="the weight decay for l2 normalizaton")
    parser.add_argument('--tau', type=float, default=0.02, help="parameter tau")

    # test
    parser.add_argument('--testbatch', type=int, default=100,
                        help="the batch size of users for testing")
    parser.add_argument('--topks', nargs='?', default="[5, 10, 20, 50, 100, 200]",
                        help="@k test list")
    parser.add_argument('--multicore', type=int, default=1, help='whether we use multiprocessing or not in test')

    # other
    parser.add_argument('--tensorboard', type=int, default=1,
                        help="enable tensorboard")
    parser.add_argument('--comment', type=str, default="lgn")
    parser.add_argument('--load', type=int, default=0)
    parser.add_argument('--seed', type=int, default=2020, help='random seed')
    parser.add_argument('--gpu', type=str, default='2', help='gpu number')
    parser.add_argument('--path', type=str, default="./checkpoints",
                        help="path to save weights")

    return parser.parse_args()

'''
config["dataset"] = args.dataset
config["model"] = args.model
config["loss"] = args.loss

config["register"].check_dataset(config["dataset"])
config["register"].check_model(config["model"])


# model
config['latent_dim_rec'] = args.recdim
config['lightGCN_n_layers'] = args.layer
config['dropout'] = args.dropout
config['keep_prob'] = args.keepprob
config['A_n_fold'] = args.a_fold
config["tanh_beta"] = 100

# train
config['pretrain'] = args.pretrain
config["TRAIN_epochs"] = args.epochs
config['batch_size'] = args.batch_size
config["is_use_early_stop"] = args.is_use_early_stop
config['lr'] = args.lr

# loss
config["tau"] = args.tau
config["weight_decay"] = args.weight_decay

# test
config['test_u_batch_size'] = args.testbatch
config["topks"] = eval(args.topks)
config['multicore'] = args.multicore

# other
config['A_split'] = False
config['bigdata'] = False
config["gpu"] = args.gpu
config["seed"] = args.seed
config["LOAD"] = args.load
config["PATH"] = args.path
config["data_path"] = DATA_PATH
'''