import tensorflow.keras.backend as K
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam

from utils.bert_info import BertInfo
from utils.dataloader import QADataLoader
from utils.loss import LanguageModelLossLayer
from utils.generator import ReadingComprehension
from utils.callback import LossSaveCallback
from utils.datagenerator import QADataGenerator

from bert4keras.tokenizers import load_vocab,Tokenizer
from bert4keras.models import build_transformer_model


max_p_len = 256
max_q_len = 64
max_a_len = 32
max_qa_len = max_q_len + max_a_len
batch_size = 64
epochs = 8

bert_info_object = BertInfo()
token_dict,keep_tokens = load_vocab(
    dict_path=bert_info_object.dict_path,
    simplified=True,
    startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]']
)
tokenizer = Tokenizer(token_dict,do_lower_case=True)


model = build_transformer_model(
    config_path= bert_info_object.config_path,
    checkpoint_path= bert_info_object.checkpoint_path,
    application='unilm',
    keep_tokens=keep_tokens,
    num_hidden_layers = 3,
)

output = LanguageModelLossLayer(2)(model.inputs + model.outputs)

model = Model(model.inputs,output)
model.compile(optimizer=Adam(1e-5))
model.summary()


reader = ReadingComprehension(
    start_id=None,
    end_id=tokenizer._token_end_id,
    max_a_len= max_a_len,
    max_q_len = max_q_len,
    max_p_len = max_p_len,
    generator_model=model,
    tokenizer=tokenizer
)

evaluator = LossSaveCallback('./best_model')

if __name__ == '__main__':
    qa_data = QADataLoader()
    train_generator = QADataGenerator(qa_data.get_train_data()[:6400],tokenizer,max_p_len,max_qa_len,batch_size)

    model.fit_generator(
        train_generator.forfit(),
        steps_per_epoch=100,#len(train_generator),
        epochs=epochs,
        callbacks=[evaluator]
    )


