from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
# 加载预训练的模型和分词器 | |
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased-finetuned-sst-2-english") | |
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased-finetuned-sst-2-english") | |
# 对输入文本进行分词和编码 | |
inputs = tokenizer("I love Transformers!", return_tensors="pt") | |
# 用模型进行推理,得到预测的标签和分数 | |
outputs = model(**inputs) | |
label = outputs.logits.argmax(-1).item() | |
score = outputs.logits.max(-1).values.item() | |
# 打印结果 | |
print(f"Label: {label}, Score: {score}") |