from transformers import BertTokenizer, BertForSequenceClassification
import torch

# 加载训练好的模型
tokenizer = BertTokenizer.from_pretrained('model/chinese-macbert-base')
model = BertForSequenceClassification.from_pretrained('model/chinese-macbert-base', num_labels=3)
model.load_state_dict(torch.load('train/trained_model.pt'))

# 准备验证数据
print("推理开始回答三个问题（回答该问题所属分类，答案0 楼市类 1 股市类 2 教育类）")

text1 = '瑞士留学生活：学校打工挣兑换券故事'
text2 = '盘前：苹果财报推动美股指期货走高'
text3 = '湄洲湾职业技术学院：计划招生2000名'


def read_txt(text):
    print("问题: ", text)
    encoded = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=128,
        padding=True,
        truncation=True,
        return_tensors='pt'
    )

    input_ids = encoded['input_ids']
    attention_mask = encoded['attention_mask']

    # 模型前向传播
    outputs = model(input_ids, attention_mask=attention_mask)
    logits = outputs.logits

    # 获取预测标签
    predicted_label = torch.argmax(logits, dim=1).item()

    # 获取所有标签的概率分布
    probabilities = torch.softmax(logits, dim=1).tolist()[0]

    print(f"预测标签: {predicted_label}")
    print("标签概率分布:")
    for label, prob in enumerate(probabilities):
        print(f"标签 {label}: {prob}")


read_txt(text1);
read_txt(text2);
read_txt(text3);
