import numpy as np

from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer
from bert4keras.snippets import AutoRegressiveDecoder

from utils.bert_info import GptInfo

gpt_info_object = GptInfo()

tokenizer = Tokenizer(gpt_info_object.dict_path,do_lower_case=True)
speakers = [
    tokenizer.token_to_id('[speaker1]'),
    tokenizer.token_to_id('[speaker2]')
]

model = build_transformer_model(
    config_path=gpt_info_object.config_path,
    checkpoint_path=gpt_info_object.checkpoint_path,
    model='gpt_openai'
)


class ChatBot(AutoRegressiveDecoder):
    """基于随机采样对话机器人
        """
    @AutoRegressiveDecoder.wraps(default_rtype='probas')
    def predict(self,inputs,output_ids,states):
        token_ids,segment_ids = inputs
        curr_segment_ids = np.zeros_like(output_ids) + token_ids[0,-1]
        token_ids = np.concatenate([token_ids,output_ids],1)
        segment_ids = np.concatenate([segment_ids,curr_segment_ids],1)
        return model.predict([token_ids,segment_ids])[:,-1]

    def response(self,texts,topk=5):
        '''
        word embedding:  [cls]      [speaker1]    您         好       [speaker2]   您          好           [speaker1]      再         见          [sep]
    segment embedding :  [speaker1] [speaker1] [speaker1] [speaker1]  [speaker2]  [speaker2]  [speaker2]    [speaker1]   [speaker1]  [speaker1]  [speaker1]
    position embedding:    0           1           2         3             4         5           6            7             8            9          10
        '''
        token_ids = [tokenizer._token_start_id,speakers[0]]
        segment_ids = [tokenizer._token_start_id,speakers[0]]
        for i,text in enumerate(texts):
            ids = tokenizer.encode(text)[0][1:-1] + [speakers[(i+1)  % 2]]
            token_ids.extend(ids)
            segment_ids.extend([speakers[i % 2]] * len(ids))
            segment_ids[-1] = speakers[(i + 1) % 2]
        results = self.random_sample([token_ids,segment_ids],1,topk)
        return tokenizer.decode(results[0])

chatbot = ChatBot(start_id=None,end_id=tokenizer._token_end_id,maxlen=32)

import time
history = []
text = input("你：")
while text:
    history.append(text)
    start_time = time.time()
    response = chatbot.response(history)
    end_time = time.time()
    print("预测时间：" + str(end_time - start_time))
    history.append(response)

    #保存4轮数据
    if len(history) > 8:
        history = history[-8:]
    print("机器：" + response)
    text = input("你：")


