from transformers import AutoTokenizer # 导入自动分词器
from transformers import AutoModel # 导入自动模型
from transformers import AutoModelForSequenceClassification # 导入用于序列分类的自动模型
import torch # 导入PyTorch库

# 加载预训练模型和分词器：
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)


###### padding的作用
# 定义输入序列的编码：
sequence1_ids = [[200,200,200]]
sequence2_ids = [[200,200]]

batched_ids = [
    [200,200,200],
    [200,200,tokenizer.pad_token_id]
]

# 使用模型进行推断：
print(model(torch.tensor(sequence1_ids)).logits) # 预测输出
print(model(torch.tensor(sequence2_ids)).logits)
print(model(torch.tensor(batched_ids)).logits)


# tensor([[ 1.5694, -1.3895]], grad_fn=<AddmmBackward0>)
# tensor([[ 0.5803, -0.4125]], grad_fn=<AddmmBackward0>)
# We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See https://huggingface.co/docs/transformers/troubleshooting#incorrect-output-when-padding-tokens-arent-masked.
# tensor([[ 1.5694, -1.3895],
#         [ 1.3374, -1.2163]], grad_fn=<AddmmBackward0>)



################### attention_mask的作用
attention_mask = [
    [1,1,1],
    [1,1,1],
]

outputs = model(torch.tensor(batched_ids),attention_mask=torch.tensor(attention_mask))
print(outputs.logits)
# 输出
# tensor([[ 1.5694, -1.3895],
#         [ 1.3374, -1.2163]], grad_fn=<AddmmBackward0>)