import argparse
import os.path as osp
import os
def parse_opt():
    arg = argparse.ArgumentParser()
    #init
    arg.add_argument('--decive', type=str, default='cpu')
    arg.add_argument('--gpuid', type=int, default='0')
    arg.add_argument('--print_step', type=int, default=10)
    arg.add_argument('--n_iter', type=int, default='100')
    arg.add_argument('--load_ckpt', type=str, default=0)

    #dataset

    arg.add_argument('--dataset', type=str, default='syn')
    #arg.add_argument('--batch_size', type=int, default=20)
    arg.add_argument('--data_dir', type=str, default='F:\\shape_data')
    arg.add_argument('--voca_name', type=str, default='data/vocabulary_72700.txt')
    arg.add_argument('--voca_size', type=int, default=72700)
    arg.add_argument('--this_dir', type=str,default=osp.dirname(__file__))
 
    #language_rnn
    # Language Encoder Setting
    arg.add_argument('--word_embedding_size', type=int, default=512, help='the encoding size of each token')
    arg.add_argument('--word_vec_size', type=int, default=512, help='further non-linear of word embedding')
    arg.add_argument('--word_drop_out', type=float, default=0.5, help='word drop out after embedding')
    arg.add_argument('--bidirectional', type=int, default=2, help='bi-rnn, 1 for one-direction')
    arg.add_argument('--rnn_hidden_size', type=int, default=512, help='hidden size of LSTM')
    arg.add_argument('--rnn_type', type=str, default='lstm', help='rnn, gru or lstm')
    arg.add_argument('--rnn_drop_out', type=float, default=0.2, help='dropout between stacked rnn layers')
    arg.add_argument('--rnn_num_layers', type=int, default=1, help='number of layers in lang_encoder')
    arg.add_argument('--variable_lengths', type=int, default=0, help='use variable length to encode')

    # Optimization: General
    arg.add_argument('--max_iters', type=int, default=50000, help='max number of iterations to run')
    arg.add_argument('--sample_ratio', type=float, default=0.3,
                        help='ratio of same-type objects over different-type objects')
    arg.add_argument('--batch_size', type=int, default=2, help='batch size in number of images per batch')
    arg.add_argument('--grad_clip', type=float, default=0.1, help='clip gradients at this value')
    arg.add_argument('--seq_per_ref', type=int, default=3, help='number of expressions per object during training')
    arg.add_argument('--learning_rate_decay_start', type=int, default=8000,
                        help='at what iter to start decaying learning rate')
    arg.add_argument('--learning_rate_decay_every', type=int, default=8000,
                        help='every how many iters thereafter to drop LR by half')
    arg.add_argument('--optim_epsilon', type=float, default=1e-8,
                        help='epsilon that goes into denominator for smoothing')
    arg.add_argument('--learning_rate', type=float, default=4e-4, help='learning rate')
    arg.add_argument('--optim_alpha', type=float, default=0.8, help='alpha for adam')
    arg.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')

    # Evaluation/Checkpointing
    arg.add_argument('--num_sents', type=int, default=-1,
                        help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
    arg.add_argument('--save_checkpoint_every', type=int, default=2000, help='how often to save a model checkpoint?')
    arg.add_argument('--checkpoint_path', type=str, default='output', help='directory to save models')
    arg.add_argument('--language_eval', type=int, default=0, help='Evaluate language as well (1 = yes, 0 = no)?')
    arg.add_argument('--losses_log_every', type=int, default=25,
                        help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
    arg.add_argument('--load_best_score', type=int, default=1,
                        help='Do we load previous best score when resuming training.')
    # misc
    arg.add_argument('--id', type=str, default='0', help='an id identifying this run/job.')
    arg.add_argument('--seed', type=int, default=24, help='random number generator seed to use')

    
    args = arg.parse_args()
    #args_dict = vars(args)
    print(args)
    return args

def get_fire(**kwargs):
    print(kwargs)
    return kwargs

