"""
模型的评估
"""
import torch
import numpy as np

import config
from chatbot.seq2seq import Seq2seq
from lib.cut_sequence import cut
from chatbot.word_sequence import WordSequence


# 1. 加载模型
model = Seq2seq().to(config.device)
model.load_state_dict(torch.load(config.chatbot_save_model_path))


def evaluate(by_word=False):

    while True:
        # 2. 创建数据
        input_data = input("请输入:")
        input_data = cut(input_data, by_word=by_word)
        input_data = torch.LongTensor([config.chatbot_ws_input_by_word_model.
                                      transform(sentence=input_data,
                                                max_length=20)]).to(config.device)
        input_length = torch.LongTensor([len(input_data)]).to(config.device)

        # 3. 开始预测
        # pre: [[1], [2], ....]
        pre = np.array(model.evaluate(input_data, input_length)).flatten()  # [1, 2, ...]
        pre = list(pre)
        # print(type(pre))
        # print(pre)
        # print(list(pre))
        output = "".join(config.chatbot_ws_target_by_word_model.reverse_transform(pre))
        print("answer:", output)


def evaluate_by_beamsearch(by_word=False):
    user_input = "今天天气很好"
    sentence = cut(user_input, by_word=by_word)
    input_lenght = torch.LongTensor([len(sentence)]).to(config.device)
    # sentence: [1, max_len]
    sentence = torch.LongTensor(
        [config.chatbot_ws_input_by_word_model.transform(sentence, max_length=config.chatbot_max_len)]).to(
        config.device)
    with torch.no_grad():
        # sentence: [1, max_len]
        # input_length: [1]
        pred = model.evaluate_by_beamserch(sentence, input_lenght)
        print(pred)
        result = config.chatbot_ws_target_by_word_model.reverse_transform(pred)
        print(result)




