import glob
import numpy as np
from bert4keras.backend import keras,K
from bert4keras.layers import Loss
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer,load_vocab
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding,open
from bert4keras.snippets import DataGenerator,AutoRegressiveDecoder
from keras.models import Model


maxlen = 256
batch_size =16
steps_per_epoch = 1
epochs=1

import os
import sys
if 'win' in sys.platform:
    BertPath = r'E:\nlp-data\pretrain_model\bert\model_chinese'
else:
    BertPath = r'/data/nlp_data/bert/model_chinese/'

config_path = os.path.join(BertPath,'bert_config.json')
checkpoint_path = os.path.join(BertPath,'bert_model.ckpt')
dict_path = os.path.join(BertPath,'vocab.txt')


# 训练样本。THUCNews数据集，每个样本保存为一个txt。
txts = glob.glob('datasets/THUCNews/*/*.txt')
token_dict,keep_tokens = load_vocab(
    dict_path=dict_path,
    simplified=True,
    startwith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict,do_lower_case=True)

class data_generator(DataGenerator):
    def __iter__(self,random=False):
        batch_token_ids,batch_segment_ids = [],[]
        for is_end,txt in self.sample(random):
            text = open(txt,encoding='utf-8').read()
            text = text.split('\n')
            if len(text) > 1:
                title = text[0]
                content = '\n'.join(text[1:])
                token_ids,segment_ids = tokenizer.encode(
                    content,title,max_length=maxlen
                )
                batch_token_ids.append(token_ids)
                batch_segment_ids.append(segment_ids)

            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                yield [batch_token_ids,batch_segment_ids],None
                batch_token_ids,batch_segment_ids = [],[]

class CrossEntropy(Loss):
    def compute_loss(self,inputs,mask=None):
        y_true,y_mask,y_pred = inputs
        y_true = y_true[:,1:]
        y_mask = y_mask[:,1:]
        y_pred = y_pred[:,:-1]

        loss = K.sparse_categorical_crossentropy(y_true,y_pred)
        loss = K.sum(loss * y_mask) /K.sum(y_mask)
        return loss

model = build_transformer_model(
    config_path,
    checkpoint_path,
    application='unilm',
    keep_tokens=keep_tokens
)

output = CrossEntropy(2)(model.inputs + model.outputs)

model = Model(model.inputs,output)
model.compile(optimizer=Adam(1e-5))
model.summary()

class AutoTitle(AutoRegressiveDecoder):
    """seq2seq解码器
        """
    @AutoRegressiveDecoder.set_rtype('probas')
    def predict(self, inputs, output_ids, step):
        token_ids,segment_ids = inputs
        token_ids = np.concatenate([token_ids,output_ids],1)
        segment_ids = np.concatenate([segment_ids,np.ones_like(output_ids)],1)
        return model.predict([token_ids,segment_ids])[:,-1]

    def generate(self,text,topk=1):
        max_c_len = maxlen - self.maxlen
        token_ids,segment_ids = tokenizer.encode(text,max_length=max_c_len)
        output_ids = self.beam_search([token_ids,segment_ids],topk)
        return tokenizer.decode(output_ids)


autotitle = AutoTitle(start_id=None,end_id=tokenizer._token_end_id,maxlen=32)

def just_show():
    s1 = u'夏天来临，皮肤在强烈紫外线的照射下，晒伤不可避免，因此，晒后及时修复显得尤为重要，否则可能会造成长期伤害。专家表示，选择晒后护肤品要慎重，芦荟凝胶是最安全，有效的一种选择，晒伤严重者，还请及 时 就医 。'

    print(u'生成标题:', autotitle.generate(s1))
    print()


class Evaluate(keras.callbacks.Callback):
    def __init__(self):
        self.lowest = 1e10


    def on_epoch_end(self, epoch, logs=None):
        if logs['loss'] <= self.lowest:
            self.lowest = logs['loss']
            model.save_weights("output/seq2seq_autotitle.weights")

        just_show()


if __name__ == '__main__':
    if not os.path.exists("output/seq2seq_autotitle.weights"):
        evaluator = Evaluate()
        train_generator = data_generator(txts,batch_size)

        model.fit_generator(
            train_generator.forfit(),
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            callbacks=[evaluator]
        )
    else:
        model.load_weights("output/seq2seq_autotitle.weights")
        just_show()



