import torch
from 聊天模型 import LanguageModel
from 词嵌入 import embedding, tokenizer

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)

model = LanguageModel()
model.load_state_dict(torch.load("weights/best_model.pt"))
model = model.to(device)

# 开启推理模式
model.eval()

# 定义聊天函数chat
def chat(src, max_len=50):
    # 分词
    src,src_key_padding_mask = tokenizer(src)
    src = src.to(device)
    src_key_padding_mask = src_key_padding_mask.to(device)
    # 声明目标序列
    tgt = '[CLS]'
    ids = []
    tokens = []
    for i in range(max_len):
        _tgt,tgt_key_padding_mask = tokenizer(tgt)
        _tgt = _tgt.to(device)
        tgt_key_padding_mask = tgt_key_padding_mask.to(device)

        with torch.no_grad():
            y = model(src, _tgt, src_key_padding_mask, tgt_key_padding_mask)

        # 截取有效部分
        valid_y = y[tgt_key_padding_mask == 0]
        # 激活并找出最大值索引
        _ids = valid_y.softmax(-1).argmax(-1)
        last_ids = _ids[-1].item()
        last_token = embedding.tokenizer.convert_ids_to_tokens([last_ids])[0]
        ids.append(last_ids)
        tokens.append(last_token)
        if last_ids == 102:
            break

        tgt = tgt + last_token
    return ids, tokens

if __name__ == '__main__':
    inputs = "你是谁"
    with torch.no_grad():
        ids , tokens = chat(inputs)

    print("human:",inputs)
    print("assistant:","".join(tokens))

