import torch
from bert_pytorch.model.bert import BERT
from bert_pytorch.model import BERTLM
from bert_pytorch.dataset import WordVocab
import numpy as np

def process(sentence: str) -> tuple:
    """输入预处理,将句子转化为对应的id

    :param str sentence: 输入的句子
    :return tuple: [token,label]
    """
    sentence = "hello world"
    tokens = sentence.split()
    output_label = []
    for i, token in enumerate(tokens):
        tokens[i] = vocab.stoi.get(token, vocab.unk_index)
        output_label.append(0)
    return tokens, output_label
def infer(s1:str, s2:str)->tuple:
    input_t1, _ = process("hello")
    input_t2, _ = process("World")
    t1 = [vocab.sos_index] + input_t1 + [vocab.eos_index]
    t2 = [vocab.sos_index] + input_t2 + [vocab.eos_index]
    # segment_label表示当前是第一句话还是第二句话,position的一部分
    segment_label = ([1 for _ in range(len(t1))] + [2 for _ in range(len(t2))])[:seq_len]
    bert_input = (t1 + t2)[:seq_len]
    bert_input=np.array(bert_input)
    bert_input=bert_input[None,:]
    bert_input=torch.from_numpy(bert_input)
    segment_label=np.array(segment_label)
    segment_label=torch.from_numpy(segment_label)
    next_sent_output, mask_lm_output = model(bert_input, segment_label)
    mask_lm_output = mask_lm_output.transpose(1, 2)
    # 待续

# 2020 256 8 8
# 2020是len(vocab)，自己去看vocab.py里面的
seq_len = 20
f = torch.load("./output/bert.model.ep0")
bert = BERT(2020, 256, 8, 8)
model = BERTLM(bert, 2020)
model.load_state_dict(f)
# 加载词汇表,方便将预测的词转化为对应的id
vocab = WordVocab.load_vocab(r".\data\vocab.small")
infer("hello","world")

