import torch
from transformers import AutoTokenizer,AutoModelForSequenceClassification

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 预测句子对的标签---
def predict_sentence_pair(model_path, text1, text2):
    model = AutoModelForSequenceClassification.from_pretrained(model_path)
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model.to(device)
    # 对文本进行编码
    encoded_inputs = tokenizer(text1, text2, padding=True, truncation=True, return_tensors="pt")

    input_ids = encoded_inputs['input_ids'].to(device)
    attention_mask = encoded_inputs['attention_mask'].to(device)

    with torch.no_grad():
        outputs = model(input_ids, attention_mask=attention_mask)
        _, preds = torch.max(outputs.logits, dim=1)

    return preds

if __name__ == '__main__':
    #用预训练模型获取句子标签（0表示不相似，1表示相似）----需要用用分类数据集微调后的模型
    model_path = "/Users/apple/PycharmProjects/sentence-similarity/bert_model"
    #不存在训练数据中的句子对
    predictions = predict_sentence_pair(model_path, "不想还花呗了，利息太高", "今晚吃什么")
    print("Predicted labels:", predictions.tolist())

    #不存在训练数据中的句子对
    predictions = predict_sentence_pair(model_path, "不想还花呗了，利息太高", "不想还支付宝了")
    print("Predicted labels:", predictions.tolist())

    #存在训练数据中的句子对
    predictions = predict_sentence_pair(model_path, "怎样关闭借呗", "我不想用了,怎么关闭借呗的界面")
    print("Predicted labels:", predictions.tolist())
