import argparse
import torch
import numpy as np
import random


def get_parser_args():
    # Training settings
    parser = argparse.ArgumentParser()
    # # 禁用CUDA训练
    # parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
    # # 在训练通过期间验证
    # parser.add_argument('--fastmode', action='store_true', default=False, help='Validate during training pass.')
    # # sparse GAT选项
    # parser.add_argument('--sparse', action='store_true', default=False, help='GAT with sparse version or not.')
    # # 随机种子
    # parser.add_argument('--seed', type=int, default=72, help='Random seed.')
    # # 要训练的epoch数
    # # parser.add_argument('--epochs', type=int, default=10000, help='Number of epochs to train.')
    # parser.add_argument('--epochs', type=int, default=2, help='Number of epochs to train.')
    # # 最初的学习率
    # parser.add_argument('--lr', type=float, default=0.005, help='Initial learning rate.')
    # # 权重衰减（参数L2损失）
    # parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')
    # # 隐藏层单元数量
    # parser.add_argument('--hidden', type=int, default=8, help='Number of hidden units.')
    # # 多头注意力机制
    # parser.add_argument('--nb_heads', type=int, default=8, help='Number of head attentions.')
    # # dropout率（1-保持概率)
    # parser.add_argument('--dropout', type=float, default=0.6, help='Dropout rate (1 - keep probability).')
    # # Leaky ReLU参数
    # parser.add_argument('--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.')
    #
    # parser.add_argument('--patience', type=int, default=100, help='Patience')

    # args = parser.parse_args()
    args = parser.parse_args(args=[])
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    # 产生随机种子，以使得结果是确定的
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
    return args
