import argparse
import sys

# argv = sys.argv
# 数据集名称从运行命令中获得
# dataset = argv[1]
# print(f"Dataset: {dataset}")
# 这里先不设置默认为ACM
# dataset = "ACM"

# 在这个脚本中，parse_arguments() 函数用来解析命令行参数，并从中获取数据集的名称。
# set_params(dataset) 函数根据传入的数据集名称调用相应的参数设置函数。
# 最终，__main__ 中的逻辑会根据命令行输入设置相应的数据集参数并打印出来。

def parse_args(args=None):
    parser = argparse.ArgumentParser(description='SeHGNN')

    ## For environment costruction
    parser.add_argument('--seeds', nargs='+', type=int, default=[1],
                        help='the seed used in the training')
    parser.add_argument('--dataset', type=str, default='DBLP',
                        choices=['DBLP', 'ACM', 'IMDB', 'Freebase'])
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--cpu', action='store_true', default=False)
    parser.add_argument('--root', type=str, default='./data/')
    parser.add_argument('--epoch', type=int, default=50, help='Maxinum number of epochs.')
    parser.add_argument('--embed-size', type=int, default=256,
                        help='inital embedding size of nodes with no attributes')
    parser.add_argument('--num-hops', type=int, default=2,
                        help='number of hops for propagation of raw labels')
    parser.add_argument('--label-feats', action='store_true', default=False,
                        help='whether to use the label propagated features')
    parser.add_argument('--num-label-hops', type=int, default=2,
                        help='number of hops for propagation of raw features')
    ## For network structure
    parser.add_argument('--n-fp-layers', type=int, default=2,
                        help='the number of mlp layers for feature projection')
    parser.add_argument('--n-task-layers', type=int, default=3,
                        help='the number of mlp layers for the downstream task')
    parser.add_argument('--hidden', type=int, default=512)
    parser.add_argument('--dropout', type=float, default=0.5,
                        help='dropout on activation')
    parser.add_argument('--input-drop', type=float, default=0.1,
                        help='input dropout of input features')
    parser.add_argument('--att-drop', type=float, default=0.,
                        help='attention dropout of model')
    parser.add_argument('--act', type=str, default='none',
                        choices=['none', 'relu', 'leaky_relu', 'sigmoid'],
                        help='the activation function of the transformer part')
    parser.add_argument('--residual', action='store_true', default=False,
                        help='whether to add residual branch the raw input features')
    ## for training
    parser.add_argument('--amp', action='store_true', default=False,
                        help='whether to amp to accelerate training with float16(half) calculation')
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--weight-decay', type=float, default=0)
    parser.add_argument('--batch-size', type=int, default=10000)
    parser.add_argument('--patience', type=int, default=50,
                        help='early stop patience')

    return parser.parse_args(args)


def dblp_params(args=None):
    parser = argparse.ArgumentParser(description='SeHGNN')

    ## For environment costruction
    parser.add_argument('--seeds', nargs='+', type=int, default=[1, 2, 3, 4, 5],
                        help='the seed used in the training')
    parser.add_argument('--dataset', type=str, default='DBLP',
                        choices=['DBLP', 'ACM', 'IMDB', 'Freebase'])
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--cpu', action='store_true', default=False)
    parser.add_argument('--root', type=str, default='./data/')
    parser.add_argument('--epoch', type=int, default=200, help='Maxinum number of epochs.')
    parser.add_argument('--embed-size', type=int, default=512,
                        help='inital embedding size of nodes with no attributes')
    parser.add_argument('--num-hops', type=int, default=2,
                        help='number of hops for propagation of raw labels')
    parser.add_argument('--label-feats', action='store_true', default=True,
                        help='whether to use the label propagated features')
    parser.add_argument('--num-label-hops', type=int, default=4,
                        help='number of hops for propagation of raw features')
    ## For network structure
    parser.add_argument('--n-fp-layers', type=int, default=2,
                        help='the number of mlp layers for feature projection')
    parser.add_argument('--n-task-layers', type=int, default=3,
                        help='the number of mlp layers for the downstream task')
    parser.add_argument('--hidden', type=int, default=512)
    parser.add_argument('--dropout', type=float, default=0.5,
                        help='dropout on activation')
    parser.add_argument('--input-drop', type=float, default=0.5,
                        help='input dropout of input features')
    parser.add_argument('--att-drop', type=float, default=0.,
                        help='attention dropout of model')
    parser.add_argument('--act', type=str, default='none',
                        choices=['none', 'relu', 'leaky_relu', 'sigmoid'],
                        help='the activation function of the transformer part')
    parser.add_argument('--residual', action='store_true', default=True,
                        help='whether to add residual branch the raw input features')
    ## for training
    parser.add_argument('--amp', action='store_true', default=True,
                        help='whether to amp to accelerate training with float16(half) calculation')
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--weight-decay', type=float, default=0)
    parser.add_argument('--batch-size', type=int, default=10000)
    parser.add_argument('--patience', type=int, default=50,
                        help='early stop patience')

    return parser.parse_args(args)


def init_params(parser):
    ## For environment costruction
    parser.add_argument('--seeds', nargs='+', type=int, default=[1],
                        help='the seed used in the training')
    parser.add_argument('--dataset', type=str, default='DBLP',
                        choices=['DBLP', 'ACM', 'IMDB', 'Freebase'])
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--cpu', action='store_true', default=False)
    parser.add_argument('--root', type=str, default='./data/')
    parser.add_argument('--epoch', type=int, default=50, help='Maxinum number of epochs.')
    parser.add_argument('--embed-size', type=int, default=256,
                        help='inital embedding size of nodes with no attributes')
    parser.add_argument('--num-hops', type=int, default=2,
                        help='number of hops for propagation of raw labels')
    parser.add_argument('--label-feats', action='store_true', default=False,
                        help='whether to use the label propagated features')
    parser.add_argument('--num-label-hops', type=int, default=2,
                        help='number of hops for propagation of raw features')
    ## For network structure
    parser.add_argument('--n-fp-layers', type=int, default=2,
                        help='the number of mlp layers for feature projection')
    parser.add_argument('--n-task-layers', type=int, default=3,
                        help='the number of mlp layers for the downstream task')
    parser.add_argument('--hidden', type=int, default=512)
    parser.add_argument('--dropout', type=float, default=0.5,
                        help='dropout on activation')
    parser.add_argument('--input-drop', type=float, default=0.1,
                        help='input dropout of input features')
    parser.add_argument('--att-drop', type=float, default=0.,
                        help='attention dropout of model')
    parser.add_argument('--act', type=str, default='none',
                        choices=['none', 'relu', 'leaky_relu', 'sigmoid'],
                        help='the activation function of the transformer part')
    parser.add_argument('--residual', action='store_true', default=False,
                        help='whether to add residual branch the raw input features')
    ## for training
    parser.add_argument('--amp', action='store_true', default=False,
                        help='whether to amp to accelerate training with float16(half) calculation')
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--weight-decay', type=float, default=0)
    parser.add_argument('--batch-size', type=int, default=10000)
    parser.add_argument('--patience', type=int, default=50,
                        help='early stop patience')
