class ModelConfig:
    def __init__(self):
        self.vocab_size = 9004
        self.batch_size = 128
        self.initializer_scale = 0.08

        self.image_feature_size = 2048
        self.num_lstm_units = 512
        self.embedding_size = 512

        self.lstm_drop_keep_prob = 0.7
        self.padded_length = 25

        self._null = 0
        self._start = 1
        self._end = 2


class TrainingConfig:
    def __init__(self):
        # TODO
        self.num_examples_per_epoch = 400000
        self.optimizer = 'SGD'

        self.initial_learning_rete = 1.0
        self.learning_rate_decay_factor = 0.5
        self.num_epochs_per_decay = 8.0

        self.clip_gradients = 5.0
        self.total_num_epochs = 5

        self.batch_size = 128
        self.epochs = 15

        self.num_checkpoints = 5
        self.display_every = 10
        self.checkpoint_every = 100


class DataConfig:
    def __init__(self):
        self.image_path_to_vector_file = './data/processed_data/image_path_to_vector.npy'

        self.saved_folder = './data/processed_data/'
        self.image_folder = './data/Flickr8k_Dataset/'
        self.token_file = './data/Flickr8k_text/Flickr8k.token.txt'
        self.train_images_file = './data/Flickr8k_text/Flickr_8k.trainImages.txt'
        self.val_images_file = './data/Flickr8k_text/Flickr_8k.devImages.txt'
        self.test_images_file = './data/Flickr8k_text/Flickr_8k.testImages.txt'

        self.vocab_file = './data/processed_data/vocab.txt'
        self.vocab_vectors_file = './data/processed_data/vocab_vectors.txt'
        self.char_to_id_file = './data/processed_data/char_to_id.txt'
        self.id_to_char_file = './data/processed_data/id_to_char.txt'


class ExtractionConfig:
    def __init__(self):
        self.model_dir = './data/'  # path of classify_image_graph_def.pb
        self.img_dir = './data/Flickr8k_Dataset/'
        self.save_dir = './data/processed_data/image_path_to_vector.npy'

        self.pre_trained_folder = './data/pre_trained_data/'
        self.flag = 1000
