import argparse


def parse_opt():
    parser = argparse.ArgumentParser()

    # Data input settings
    parser.add_argument('--id', type=str, default='bert_feature_fuse', help='<swin_transformer> or <swin_diff_head_decode> or <swin_obj_dect_trans> or <swin_obj_dect_trans_rl2> or <feature_fusion> or <feature_fusion3_rl> or <bert_feature_fuse>or<bert_feature_fuse_rl>')
    parser.add_argument('--caption_model', type=str, default='bert_feature_fuse', help='<swin_transformer> or <swin_diff_head_decode> or <swin_obj_dect_trans> or <feature_fusion>,<feature_fusion2>,<feature_fusion3>,<bert_feature_fuse>')
    parser.add_argument('--input_json', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocotalk.json',
                        help='path to the json file connecting  additional info and vocab')
    parser.add_argument('--input_label_h5', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocotalk_label.h5',
                        help='path to the h5file containing the preprocessed dataset')
    parser.add_argument('--input_att_dir', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocobu_att',
                        help='path to the directory containing the preprocessed att feats')
    parser.add_argument('--input_fc_dir', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocobu_fc',
                        help='path to the directory containing the preprocessed fc feats')
    parser.add_argument('--input_rel_box_dir', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocobu_box_relative',
                        help="this directory contains the bboxes in relative coordinates for the corresponding image "
                             "features in --input_att_dir")
    parser.add_argument('--pretrained_glove', type=str,
                        default='/mnt/hdd0/home/fyc/Transformer/data/glove/glove.840B.300d.txt',
                        help="pretrained glove vector")
    parser.add_argument('--image_root', type=str, default='/mnt/hdd0/home/fyc/COCO_dataset', help='image path')
    parser.add_argument('--image_size', type=int, default=224, help='image size')
    parser.add_argument('--patch_size', type=int, default=4, help='patch size')
    parser.add_argument('--in_chans', type=int, default=3, help='channels of input images')
    parser.add_argument('--embed_dim', type=int, default=96)
    parser.add_argument('--patch_embed_dim', type=int, default=768)
    parser.add_argument('--swin_depths', type=list, default=[2, 4], help='depths of swin transformer')
    parser.add_argument('--swin_num_heads', type=list, default=[6, 12])
    parser.add_argument('--encode_num_layers', type=int, default=6, help='bbox encoder')
    parser.add_argument('--decoder_num_layers', type=int, default=6)
    parser.add_argument('--decoder_num_heads', type=int, default=8)
    parser.add_argument('--learning_rate', type=float, default=5e-4,
                        help='learning rate')
    parser.add_argument('--input_encoding_size', type=int, default=512,
                        help='the encoding size of each token in the vocabulary, and the image.')
    parser.add_argument('--rnn_size', type=int, default=2048,
                        help='size of the rnn in number of hidden nodes in each layer')
    parser.add_argument('--window_size', type=int, default=7, help='window size')
    parser.add_argument('--mlp_ratio', type=float, default=4.0, help='ratio of MLP')
    parser.add_argument('--drop_rate', type=float, default=0.1, help='model drop rate')
    parser.add_argument('--drop_path_rate', type=float, default=0.1)
    parser.add_argument('--drop_prob_lm', type=float, default=0.5,
                        help='strength of dropout in the Language Model RNN')
    parser.add_argument('--qkv_bias', type=bool, default=True, help='need bias for QKV?')
    parser.add_argument('--qk_scale', type=bool, default=None)
    parser.add_argument('--att_feat_size', type=int, default=2048)
    parser.add_argument('--feature_fuse_size', type=int, default=1024)
    parser.add_argument('--patch_norm', type=bool, default=True, help='patch norm')
    parser.add_argument('--ape', type=bool, default=False, help='absolute position embedding')
    parser.add_argument('--batch_size', type=int, default=20, help='minibatch size')
    parser.add_argument('--max_epochs', type=int, default=45, help='number of epochs')
    parser.add_argument('--seq_length', type=int, default=16, help='default sequence length')
    parser.add_argument('--seq_per_img', type=int, default=5,
                        help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image')
    parser.add_argument('--grad_clip', type=float, default=0.1,  # 5.,
                        help='clip gradients at this value')
    parser.add_argument('--language_eval', type=int, default=1,
                        help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
    parser.add_argument('--train_only', type=int, default=0, help='if true then use 80k, else use 110k')
    parser.add_argument('--losses_log_every', type=int, default=25,
                        help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
    parser.add_argument('--use_checkpoint', type=bool, default=False)
    parser.add_argument('--checkpoint_path', type=str, default='/mnt/hdd0/home/fyc/code/swin_transformer_checkpoint/bert_feature_fuse/',
                        help='directory to store checkpointed models')
    parser.add_argument('--save_checkpoint_every', type=int, default=3000,
                        help='how often to save a model checkpoint (in iterations)?')
    parser.add_argument('--start_from', type=str, default=None,
                        help="""continue training from saved model at this path. Path must contain files saved by previous training process:
                            'infos.pkl'         : configuration;
                            'checkpoint'        : paths to model file(s) (created by tf).
                                                  Note: this file contains absolute paths, be careful when moving files around;
                            'model.ckpt-*'      : file(s) with model definition (created by tf)
                            '/mnt/hdd0/home/fyc/code/swin_transformer_checkpoint/feature_fusion3/
                            /mnt/hdd0/home/fyc/code/swin_transformer_checkpoint/bert_feature_fuse_rl/'
                             """)
    parser.add_argument('--self_critical_after', type=int, default=30)
    parser.add_argument('--scheduled_sampling_start', type=int, default=0,
                        help='at what iteration to start decay gt probability')
    parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05,
                        help='How much to update the prob')
    parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25,
                        help='Maximum scheduled sampling prob.')
    parser.add_argument('--cached_tokens', type=str, default='coco-train-idxs',
                        help='Cached token file for calculating cider score during self critical training.')
    parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5,
                        help='every how many iterations thereafter to gt probability')
    parser.add_argument('--learning_rate_decay_start', type=int, default=0,
                        help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
    parser.add_argument('--learning_rate_decay_every', type=int, default=3,
                        help='every how many iterations thereafter to drop LR?(in epoch)')

    parser.add_argument('--cider_reward_weight', type=float, default=1,
                        help='The reward weight from cider')
    parser.add_argument('--bleu_reward_weight', type=float, default=0,
                        help='The reward weight from bleu4')

    parser.add_argument('--optim', type=str, default='adam',
                        help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam')
    parser.add_argument('--optim_alpha', type=float, default=0.9,
                        help='alpha for adam')
    parser.add_argument('--optim_beta', type=float, default=0.999,
                        help='beta used for adam')
    parser.add_argument('--optim_epsilon', type=float, default=1e-8,
                        help='epsilon that goes into denominator for smoothing')
    parser.add_argument('--weight_decay', type=float, default=0,
                        help='weight_decay')
    parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8,
                        help='every how many iterations thereafter to drop LR?(in epoch)')
    parser.add_argument('--load_best_score', type=int, default=1,
                        help='Do we load previous best score when resuming training.')
    parser.add_argument('--noamopt', type=int, default=10000,
                        help='')
    parser.add_argument('--noamopt_warmup', type=int, default=10000,
                        help='')
    # parser.add_argument('--noamopt', action='store_true',
    #                     help='')
    # parser.add_argument('--noamopt_warmup', type=int, default=2000,
    #                     help='')
    parser.add_argument('--noamopt_factor', type=float, default=1,
                        help='')
    parser.add_argument('--reduce_on_plateau', action='store_true', help='')
    args = parser.parse_args()

    return args