import os

'''
This file is taken and modified from R-Net by HKUST-KnowComp
https://github.com/HKUST-KnowComp/R-Net
'''


class Config:
    home = "/data/qanet/"
    train_dir = home + "train"

    train_file = os.path.join(home, "data", "squad", "train-v1.1.json")
    dev_file = os.path.join(home, "data", "squad", "dev-v1.1.json")
    test_file = os.path.join(home, "data", "squad", "dev-v1.1.json")
    glove_word_file = os.path.join(home, "data", "glove", "glove.840B.300d.txt")

    model_name = "FRC"
    dir_name = os.path.join(train_dir, model_name)
    if not os.path.exists(train_dir):
        os.mkdir(train_dir)
    if not os.path.exists(os.path.join(train_dir, dir_name)):
        os.mkdir(os.path.join(train_dir, dir_name))
    target_dir = home + "data"
    log_dir = os.path.join(dir_name, "event")
    save_dir = os.path.join(dir_name, "model")
    answer_dir = os.path.join(dir_name, "answer")
    train_record_file = os.path.join(target_dir, "train.rec")
    dev_record_file = os.path.join(target_dir, "dev.rec")
    test_record_file = os.path.join(target_dir, "test.rec")
    word_emb_file = os.path.join(target_dir, "word_emb.json")
    char_emb_file = os.path.join(target_dir, "char_emb.json")
    train_eval = os.path.join(target_dir, "train_eval.json")
    dev_eval = os.path.join(target_dir, "dev_eval.json")
    test_eval = os.path.join(target_dir, "test_eval.json")
    dev_meta = os.path.join(target_dir, "dev_meta.json")
    test_meta = os.path.join(target_dir, "test_meta.json")
    word_dictionary = os.path.join(target_dir, "word_dictionary.json")
    char_dictionary = os.path.join(target_dir, "char_dictionary.json")
    answer_file = os.path.join(answer_dir, "answer.json")

    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    if not os.path.exists(answer_dir):
        os.makedirs(answer_dir)

    glove_char_size = 94  # Corpus size for Glove
    glove_word_size = int(2.2e6)  # Corpus size for Glove
    glove_dim = 300  # Embedding dimension for Glove
    char_dim = 64  # Embedding dimension for char

    para_limit = 300  # Limit length for paragraph
    ques_limit = 20  # Limit length for question
    ans_limit = 30  # Limit length for answers
    test_para_limit = 1000  # Limit length for paragraph in test file
    test_ques_limit = 100  # Limit length for question in test file
    char_limit = 16  # Limit length for character
    word_count_limit = -1  # Min count for word
    char_count_limit = -1  # Min count for char

    capacity = 15000  # Batch size of dataset shuffle
    num_threads = 4  # Number of threads in input pipeline
    is_bucket = False  # build bucket batch iterator or not
    bucket_range = [40, 401, 40]  # the range of bucket

    batch_size = 16  # Batch size
    num_steps = 60000  # Number of steps
    checkpoint = 1000  # checkpoint to save and evaluate the model
    period = 100  # period to save batch loss
    val_num_batches = 150  # Number of batches to evaluate the model
    dropout = 0.1  # Dropout prob across the layers
    grad_clip = 5.0  # Global Norm gradient clipping rate
    learning_rate = 0.001  # Learning rate
    decay = 0.9999  # Exponential moving average decay
    l2_norm = 3e-7  # L2 norm scale
    hidden = 96  # Hidden size
    num_heads = 4  # Number of heads in self attention
    early_stop = 10  # Checkpoints for early stop
    epochs = 20

    # Extensions (Uncomment corresponding code in download.sh to download the required data)
    glove_char_file = os.path.join(home, "data", "glove", "glove.840B.300d-char.txt")
    # glove_char_file = glove_char_file  # Glove character embedding source file
    pretrained_char = False  # Whether to use pretrained character embedding

    fasttext_file = os.path.join(home, "data", "fasttext", "wiki-news-300d-1M.vec")
    # fasttext_file = fasttext_file  # Fasttext word embedding source file
    use_fasttext = False  # Whether to use fasttext


