from torch.optim import AdamW

from demo5 import MyModel
import torch
from transformers import BertTokenizer

DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
names = ["负向评价", "正向评价"]
# 《离婚》也读完了。“离婚”翻译成更明白的话，应该叫幻灭。所有的对生活的希望都伴随着该离婚的人的不离婚而破灭了。
token = BertTokenizer.from_pretrained(r"D:\pythonWork\python_demo\model\google-bert\bert-base-chinese\models--google-bert--bert-base-chinese\snapshots\c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f")
model = MyModel().to(DEVICE)
print(model)
#自定义数据集的collate_fn 对数据进行编码处理
def collate_fn(data):
    sentes = []
    sentes.append(data)
    data = token.batch_encode_plus(
        batch_text_or_text_pairs=sentes,
        truncation=True,
        padding="max_length",
        max_length=350,
        return_tensors="pt",
        return_length = True
    )
    input_ids =  data["input_ids"]
    attention_mask = data["attention_mask"]
    token_type_ids = data["token_type_ids"]
    return input_ids, attention_mask, token_type_ids
def test():
    model.load_state_dict(torch.load(r"D:\pythonWork\python_demo\params\lora_weights.pt"))
    #开启测试模式
    model.eval()
    while True:
        text = input("请输入句子：")
        if text == "exit":
            print("退出测试")
            break
        else:
            input_ids, attention_mask, token_type_ids = collate_fn(text)
            input_ids = input_ids.to(DEVICE)
            attention_mask = attention_mask.to(DEVICE)
            token_type_ids = token_type_ids.to(DEVICE)
            with torch.no_grad():
                output = model(input_ids, attention_mask, token_type_ids)
                output = output.argmax(dim=1)
                print("模型判定为：", names[output],"\n")
if __name__ == "__main__":
    test()
