from transformers import BertTokenizer, BertForSequenceClassification
import torch

# 指定模型和 tokenizer 的路径
model_path = 'D:\\work\\code\\clark\\gitee\\py_llm\\bert\\train\\eval'  # 替换为你保存模型的实际路径

# 加载 tokenizer
tokenizer = BertTokenizer.from_pretrained(model_path)

# 加载模型
model = BertForSequenceClassification.from_pretrained(model_path)
model.eval()  # 设置模型为评估模式

def preprocess_input(text):
    inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True, max_length=128)
    return inputs

def predict_intent(text):
    inputs = preprocess_input(text)

    with torch.no_grad():
        outputs = model(**inputs)

    logits = outputs.logits
    probabilities = torch.softmax(logits, dim=-1)
    predicted_class_id = torch.argmax(probabilities, dim=-1).item()

    return predicted_class_id, probabilities

# 假设你有标签的映射关系
label_mapping = {0: "查询订单状态", 1: "了解退货政策", 2: "寻求帮助", 3: "了解服务时间"}

# 输入文本
input_text = "查下我的订单状态"

# 进行推理
predicted_class_id, probabilities = predict_intent(input_text)

# 解析输出结果
predicted_label = label_mapping[predicted_class_id]
confidence = probabilities[0][predicted_class_id].item()

print(f"Predicted Intent: {predicted_label}")
print(f"Confidence: {confidence:.4f}")