from transformers import AutoTokenizer # 导入自动分词器
from transformers import AutoModel # 导入自动模型
from transformers import AutoModelForSequenceClassification # 导入用于序列分类的自动模型
import torch # 导入PyTorch库

# 加载预训练模型和分词器：
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModel.from_pretrained(checkpoint)
model2 = AutoModelForSequenceClassification.from_pretrained(checkpoint)

# 准备输入文本：
raw_inputs=[
    "I've been waiting for a this course my whole life.",
    "I hate this so much!",
]

# 使用分词器对文本进行分词和编码：
# padding=True 表示进行填充，truncation=True 表示对超过最大长度的文本进行截断，return_tensors="pt" 表示返回 PyTorch 张量。
inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors="pt") 
print(inputs)

# 输出
# {'input_ids': tensor([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 2023, 2607, 2026, 2878,
#          2166, 1012,  102],
#         [ 101, 1045, 5223, 2023, 2061, 2172,  999,  102,    0,    0,    0,    0,
#             0,    0,    0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
#         [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]])}

print(tokenizer.decode([ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 2023, 2607, 2026, 2878, 2166, 1012,  102]))


# 输出：[CLS] i've been waiting for a this course my whole life. [SEP]
# [CLS] 分类的字符 [SEP] 终止字符



print(model)

# 输出
# DistilBertModel(
#   (embeddings): Embeddings(
#     (word_embeddings): Embedding(30522, 768, padding_idx=0)
#     (position_embeddings): Embedding(512, 768)
#     (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
#     (dropout): Dropout(p=0.1, inplace=False)
#   )
#   (transformer): Transformer(
#     (layer): ModuleList(
#       (0-5): 6 x TransformerBlock(
#         (attention): MultiHeadSelfAttention(
#           (dropout): Dropout(p=0.1, inplace=False)
#           (q_lin): Linear(in_features=768, out_features=768, bias=True)
#           (k_lin): Linear(in_features=768, out_features=768, bias=True)
#           (v_lin): Linear(in_features=768, out_features=768, bias=True)
#           (out_lin): Linear(in_features=768, out_features=768, bias=True)
#         )
#         (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
#         (ffn): FFN(
#           (dropout): Dropout(p=0.1, inplace=False)
#           (lin1): Linear(in_features=768, out_features=3072, bias=True)
#           (lin2): Linear(in_features=3072, out_features=768, bias=True)
#           (activation): GELUActivation()
#         )
#         (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
#       )
#     )
#   )
# )

# 使用模型进行预测：
outputs = model(**inputs);
print(outputs.last_hidden_state.shape)
# 输出
# torch.Size([2, 15, 768]) 2句话，15个token，768位的向量


outputs2 = model2(**inputs)
print(outputs2.logits.shape)
# 输出
# torch.Size([2, 2]) 两个样本，2分类

# 对预测结果进行 softmax 处理：
predicions = torch.nn.functional.softmax(outputs2.logits, dim=-1)
print(predicions)
# 输出
# tensor([[1.5446e-02, 9.8455e-01],
#         [9.9946e-01, 5.4418e-04]], grad_fn=<SoftmaxBackward0>)


#打印模型配置中的标签映射：
print(model2.config.id2label)
# 输出
# {0: 'NEGATIVE', 1: 'POSITIVE'}