from transformers import BertForSequenceClassification, BertTokenizer
import torch

# 加载模型和分词器
model_path = r"C:\Users\86182\Desktop\sqsx_lqbz\model_download"
tokenizer_path = model_path
model = BertForSequenceClassification.from_pretrained(model_path)
tokenizer = BertTokenizer.from_pretrained(tokenizer_path)


# 数据预处理
def preprocess(text, tokenizer, max_length=512):
    encoding = tokenizer(
        text,
        truncation=True,
        padding='max_length',
        max_length=max_length,
        return_tensors='pt'
    )
    return encoding


# 预测
def predict(text, model, tokenizer):
    encoding = preprocess(text, tokenizer)
    inputs = encoding['input_ids']
    attention_mask = encoding['attention_mask']

    with torch.no_grad():
        outputs = model(input_ids=inputs, attention_mask=attention_mask)

    logits = outputs.logits
    predictions = torch.argmax(logits, dim=-1)
    return predictions.item()


# 示例使用
text = "学生的主观题答案文本"
prediction = predict(text, model, tokenizer)
print(f"预测类别: {prediction}")
