from tensorflow.keras import backend as K
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam


from utils.dataloader import JinyongDataLoader
from utils.bert_info import BertInfo
from utils.datagenerator import BertGenTextDataGenerator
from utils.loss import LanguageModelLossLayer
from utils.generator import BertGenTextGenerator
from utils.callback import LossSaveCallback


from bert4keras.tokenizers import Tokenizer,load_vocab
from bert4keras.models import build_transformer_model

import tqdm
import numpy as np


maxlen = 256
batch_size =16
steps_per_epoch = 1000
epochs = 10000


bert_info_obj = BertInfo()

# 加载并精简词表，建立分词器
token_dict ,keep_tokens = load_vocab(
    dict_path = bert_info_obj.dict_path,
    simplified=True,
    startswith=['[PAD]','[UNK]','[CLS]','[SEP]']
)
tokenizer = Tokenizer(token_dict,do_lower_case=True)

model = build_transformer_model(
    config_path=bert_info_obj.config_path,
    checkpoint_path=bert_info_obj.checkpoint_path,
    application='lm',
    keep_tokens=keep_tokens,
    num_hidden_layers = 1,
)
output = LanguageModelLossLayer(1)([model.inputs[0],model.outputs[0]])

model = Model(model.inputs,output)
model.compile(optimizer=Adam(1e-5))
model.summary()

story_completion = BertGenTextGenerator(
    start_id=None,
    end_id=tokenizer._token_end_id,
    maxlen=maxlen,
    generator_model=model,
    tokenizer=tokenizer
)

def just_show():
    s1 = u'当晚两人在一家小客店中宿歇。张无忌躺在炕上，越想越是担心，走到赵敏窗外，但听她呼吸调匀，正自香梦沉酣。'
    s2 = u'虚竹飞身跃上松树的枝干，只见段延庆的钢杖深深嵌在树枝之中，全凭一股内力粘劲，挂住了下面四人，内力之深厚，实是非同小可。虚竹伸左手抓住钢杖，提将上来。'
    s3 = u'杨过居住在侠客岛，是令狐冲的弟子，武器是金蛇剑。'
    for s in [s1, s2, s3]:
        t = story_completion.generate(s)
        print(u'输入: %s' % s)
        print(u'结果: %s\n' % ('\n'.join(t)))

if __name__ == '__main__':
    jinyong_data = JinyongDataLoader()
    train_data = jinyong_data.get_train_data()

    data = []
    pbar = tqdm.tqdm(desc=u'构建语料中', total=sum(len(n) for n in train_data))

    for novel in train_data:
        s = u''
        for i in range(len(novel)):
            for j in range(len(novel) - i):
                if len(s) + len(novel[i + j]) > maxlen - 2:
                    data.append(s)
                    s = u''
                    break
                else:
                    s += novel[i + j]
            pbar.update(1)
            if i + j >= len(novel):
                break
        if s:
            data.append(s)

    pbar.close()
    np.random.shuffle(data)


    train_data_generator = BertGenTextDataGenerator(data,tokenizer,maxlen,batch_size)
    evaluator = LossSaveCallback('./gen/bestmode/',just_show())

    model.fit_generator(
        train_data_generator.forfit(),
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        callbacks=[evaluator]
    )