import torch
from net import Model
from transformers import BertTokenizer, AdamW

DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
names = ["负向评价", "正向评价"]
print(DEVICE)
# 加载模型
model = Model().to(DEVICE)

token = BertTokenizer.from_pretrained(r"D:\AI\HuggingFace\my-model-cache\bert-base-chinese")
def collate_fn(batch):
    # 填充到同一序列长度
    sentes = []
    sentes.append(batch)

    # 编码
    data = token.batch_encode_plus(batch_text_or_text_pairs=sentes,
                                   truncation=True,
                                   padding='max_length',
                                   max_length=500,
                                   return_tensors='pt',
                                   return_length=True
                                   )
    # input_ids:编码之后的数字
    # attention_mask:是补零的位置是0,其他位置是1
    input_ids = data['input_ids'].to(DEVICE)
    attention_mask = data['attention_mask'].to(DEVICE)
    token_type_ids = data['token_type_ids'].to(DEVICE)

    return input_ids, attention_mask, token_type_ids

def test():
    model.load_state_dict(torch.load("params/99bert.pt"))  # 加载模型权重
    model.eval()  # 评估模式
    while True:
        text = input("请输入评论(输入'q'退出)：")
        if text == 'q':
            break
        input_ids, attention_mask, token_type_ids = collate_fn(text)  # 编码
        with torch.no_grad():  # 不计算梯度
            out = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)  # 模型输出
            out = out.argmax(dim=1)  # 取最大值的索引
            print("模型判定",names[out],"\n")  # 输出结果

if __name__ == "__main__":
    test()