import argparse

idx = 0
def config_opts():
    parser = argparse.ArgumentParser(description='felix flags')

    parser.add_argument('--task', default='tagging', type=str, choices=['tagging', 'predict'], 
                        help='task train tagging, if predict, both')
    
    parser.add_argument('--model_type', default='bert_tagging', type=str,)
    
    parser.add_argument('--config_name', default='./config/bert_tagging.json', type=str, help=
                        'Path to the config file for the tagging model.')
    
    ckchoices=['./chinese-bert-wwm-ext/pytorch_model.bin',
               './output/ckpts/mate_output/tagging_20241110_140530/checkpoint-epoch-0-steps-800000/']

    parser.add_argument('--resource_dir', default='D:/workspace/res_input_output/resource/', type=str, help=
                        'Path to resource dir.')
    parser.add_argument('--init_checkpoint',
                        default=ckchoices[0],
                        type=str,
                        help='init from a pretrain model train or evaluate')

    
    parser.add_argument('--do_train', action='store_true', help='do train')
    parser.add_argument('--do_eval', action='store_true',help='do train')
    parser.add_argument('--do_pipline', action='store_true', help='cls disf pipline')
    parser.add_argument('--do_predict', action='store_true', help='do predict pipline')

    #'/local/angzhao/chat_corpus/bert_felix_corpus/'
    parser.add_argument('--train_file', default='./res_in/', type=str, help='train file ')
    parser.add_argument('--dev_file', default='./output/dev.txt', type=str, help='dev file')
    
    #
    parser.add_argument('--output_dir', default='./output/ckpts/mate_output', type=str, required=False,
                        help='the output dir of the model preds and checkpoints')
    parser.add_argument('--overwrite_outputdir', default=True, type=bool,
                        help='overwrite the content of the output dir')

    parser.add_argument("--no_cuda", default=False, type=bool, help="Avoid using CUDA when available")

    parser.add_argument('--max_seq_length', default=384, type=int, help='')
    parser.add_argument('--worker', default=0, type=int, help='')
    parser.add_argument('--max_predictions_per_seq', default=10, type=int,
                        help='Maximum predictions per sequence_output.')
    parser.add_argument('--warmup_steps', default=10000, type=int, help='Warmup steps for Adam weight decay optimizer.')
    parser.add_argument(
        '--label_map_file', default='./config/label_map.json', type=str, help=
        'Path to the label map file. ')
    parser.add_argument('--vocab_file', default='./resource/vocab.txt', type=str,
                        help='Path to the BERT vocabulary file.')
    parser.add_argument(
        '--predict_batch_size', default=32, type=int,
        help='Batch size for the prediction of insertion and tagging models.')

    parser.add_argument(
        '--max_mask', default=5, type=int, help=
        'The maximum number of MASKs the model can create per input token when '
        '`use_open_vocab == True refer to felix_config.json num_classes - 4`.')
    
    # Prediction flags.
    parser.add_argument('--predict_input_file', default='/evafs/angzhao/DisfDetection/origin/train/patch2_dev4.tsv.raw.txt.pair', type=str,
                        help='Path to the input file containing examples for which to'
                        'compute predictions.')
    parser.add_argument('--predict_output_file', default='./output/patch2_dev4.pred', type=str, help=
    'Path to the output file for predictions.')

    # Training flags.
    parser.add_argument(
        '--use_weighted_labels', default=False, type=bool, help=
        'Whether different labels were given different weights. ')

    parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
    parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
    parser.add_argument('--max_steps', type=int, default=-1, help="max steps")
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Num of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
    parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
    parser.add_argument('--logging_steps', type=int, default=10000, help="Log every X updates steps.")
    parser.add_argument('--save_steps', type=int, default=100000, help="Log every X updates steps.")
    parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
    parser.add_argument("--per_gpu_eval_batch_size", default=32, type=int, help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument('--num_train_epochs', default=5, type=int, help='Total number of training epochs to perform.')
    parser.add_argument('--learning_rate', default=2e-6, type=float, help='The initial learning rate for Adam. 2e-5')
    parser.add_argument('--evaluate_during_training', default=False, type=bool, help='evaluate during training')
    parser.add_argument('--overwrite', default=True, type=bool, help='overwrite the ckpt')
    parser.add_argument('--fp16', default=False, type=bool, help='mixd precision')
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].")
    parser.add_argument('--seed', default=42, type=int, )

    parser.add_argument('--note', default='desc note', type=str)


    args = parser.parse_args()

    return args
