import torch
from transformers import BertTokenizer, BertForSequenceClassification

# 加载模型和tokenizer
model = BertForSequenceClassification.from_pretrained('./model')
tokenizer = BertTokenizer.from_pretrained('./model')

# 预测函数
def predict(sentence1, sentence2, model, tokenizer, device):
    inputs = tokenizer(sentence1, sentence2, return_tensors='pt', truncation=True, padding=True, max_length=128)
    inputs = {key: val.to(device) for key, val in inputs.items()}  # 移动输入到GPU
    model.to(device)  # 确保模型在GPU上
    model.eval()  # 设置模型为评估模式
    with torch.no_grad():  # 禁用梯度计算
        outputs = model(**inputs)
        probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
        label = torch.argmax(probs, dim=-1).item()
    label_map_inverse = {0: 'entailment', 1: 'contradiction', 2: 'neutral'}
    return label_map_inverse[label]

# 确定使用的设备
if torch.cuda.is_available():
    device = torch.device("cuda")
    print("Using GPU:", torch.cuda.get_device_name(0))
else:
    device = torch.device("cpu")
    print("Using CPU")

# 预测示例
sentence1 = "A man is playing a guitar."
sentence2 = "A man is riding a bicycle."
result = predict(sentence1, sentence2, model, tokenizer, device)
print(f"Prediction: {result}")
