import numpy as np
import json

from tensorflow import keras
import tensorflow.keras.backend as K


from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer,load_vocab
from bert4keras.optimizers import Adam,extend_with_weight_decay

from utils.bert_info import  BertInfo
from utils.loss import SimbertModelLossLayer
from utils.generator import SimBertGenerator,SimBert_gentext
from utils.datagenerator import SimBertDataGenerator
from utils.dataloader import LCQMCLoader

maxlen = 32
batch_size = 128
steps_per_epoch = 1000
epochs=10000


bert_info_obj = BertInfo()
# 加载并精简词表，建立分词器
token_dict, keep_tokens = load_vocab(
    dict_path=bert_info_obj.dict_path,
    simplified=True,
    startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)

# bert_model的输出有两个，第一个是cls向量，第二个是每个字的向量
bert_model = build_transformer_model(
    bert_info_obj.config_path,
    checkpoint_path=bert_info_obj.checkpoint_path,
    with_pool='linear',
    application='unilm',
    keep_tokens=keep_tokens,    # 只保留keep_tokens中的字，精简原字表
    return_keras_model=False
)

encoder = keras.models.Model(bert_model.model.inputs,bert_model.outputs[0])
seq2seq = keras.models.Model(bert_model.model.inputs,bert_model.outputs[1])

outputs = SimbertModelLossLayer([2,3])(bert_model.model.inputs + bert_model.model.outputs)


model = keras.models.Model(bert_model.inputs,outputs)


#damW = extend_with_weight_decay(Adam,'AdamW')
optimizer = Adam(learning_rate=2e-6)
model.compile(optimizer=optimizer)
model.summary()



lcqmc_dataset = LCQMCLoader()
total_data = []
total_data.extend(lcqmc_dataset.get_train_data())
total_data.extend(lcqmc_dataset.get_valid_data())
total_data.extend(lcqmc_dataset.get_test_data())
train_generator = SimBertDataGenerator(total_data,tokenizer,maxlen, batch_size)

sentence_generator = SimBertGenerator(
    start_id=tokenizer._token_start_id,
    end_id = tokenizer._token_end_id,
    maxlen=maxlen,
    tokenizer=tokenizer,
    generator_model=seq2seq
)


def just_show():
    """随机观察一些样本的效果
    """
    some_samples = train_generator.some_samples
    S = [np.random.choice(some_samples) for i in range(3)]
    for s in S:
        try:
            print(u'原句子：%s' % s)
            print(u'同义句子：')
            print(SimBert_gentext(s,sentence_generator,model,tokenizer,10,10))
            print()
        except Exception as e:
            print(e)

class Evaluate(keras.callbacks.Callback):
    def __init__(self):
        self.lowest = 1e10

    def on_epoch_end(self, epoch, logs=None):
        if logs['loss'] <= self.lowest:
            self.lowest = logs['loss']
            model.save('./model_saved/best_model')
        just_show()

if __name__ == '__main__':
    evaluate = Evaluate()

    model.fit_generator(
        train_generator.forfit(),
        steps_per_epoch=1,
        epochs=1,
        callbacks=[evaluate]
    )