﻿
import argparse #参数解释器

def get_args():

    args = argparse.ArgumentParser()

    args.add_argument('--batch_size',type=int,default=128,help='input batch size for training (default: 128)')
    args.add_argument('--num_epochs',type=int,default=10,help='number of epochs to train (default: 10)')
    args.add_argument('--device',type=str,default='cuda',help='device to train (default: cuda)')
    args.add_argument('--lr',type=float,default=0.001,help='learning rate (default: 0.001)')
    args.add_argument('--work_dir',type=str,default='runs/CIFAR10_experiment_1',help='training directory (default: runs/CIFAR10_experiment_1)')
    args.add_argument('--seed',type=int,default=123,help='random seed (default: 123)')
    args.add_argument('--model',type=str,default='MLP_3_BN',help='model to train (can choose MLP_3,MLP_3MAXm,MLP_6,MLP_9,MLP_3_BN,MLP_3_BN_XavierInit,MLP_3_BN_KaimingInit)')
    args.add_argument('--L2',type=bool,default=False,help='whether to use L2 regularization (default: False)')
    args.add_argument('--weight_decay',type=float,default=0.001,help='weight decay (default: 0.001)')

    #与丢弃法相关参数
    args.add_argument('--dropout_rate',type=float,default=0.5,help='dropout rate (default: 0.5)')
    #早停相关
    args.add_argument('--early_stop_delta',type=float,default=0.1,help='early stop delta (default: 0.1);')
    args.add_argument('--early_stop_patience',type=int,default=5,help='early stop patience (default: 5);')

    # 获取优化器
    args.add_argument('--optim',type=str,default='Adam',help='optimizer to use (default: Adam), can choose Adam, SGD...')
    args.add_argument('--foreach',type=str,default='foreach',help='whether to use foreach (default: True)')
    
    return args
