# 模型使用接口（适配 8 分类）
 
import torch
from nete import Model
from transformers import BertTokenizer
 
# ===== 设备设置 =====
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
# ===== 加载字典和分词器 =====
token = BertTokenizer.from_pretrained(
    r"D:\...\model\bert-base-chinese\models--bert-base-chinese\snapshots\c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f"
)
 
# ===== 加载模型结构（若需要 dropout 参数请补上） =====
model = Model(dropout_rate=0.3, unfreeze_last_n_layers=2).to(DEVICE)
 
# ===== 标签名称，根据你训练集类别设定，示例如下 =====
names = ["非常差", "差", "一般", "中性", "还行", "满意", "很好", "非常好"]  # 示例：8类主观评价
 
# ===== 数据预处理 =====
def collate_fn(data):
    sents = [data]
    data = token.batch_encode_plus(
        batch_text_or_text_pairs=sents,
        truncation=True,
        max_length=128,  # 建议与训练时保持一致
        padding="max_length",
        return_tensors="pt",
        return_length=True
    )
    input_ids = data["input_ids"]
    attention_mask = data["attention_mask"]
    token_type_ids = data["token_type_ids"]
    return input_ids, attention_mask, token_type_ids
 
# ===== 模型预测接口 =====
def test():
    # 加载训练好的模型权重
    model.load_state_dict(torch.load(
        r"D:\...\person_test3\params\best_bert.pth",
        map_location=DEVICE
    ))
    model.eval()
 
    while True:
        data = input("请输入测试数据（输入 'q' 退出）：")
        if data.lower() == 'q':
            print("测试结束")
            break
 
        input_ids, attention_mask, token_type_ids = collate_fn(data)
        input_ids = input_ids.to(DEVICE)
        attention_mask = attention_mask.to(DEVICE)
        token_type_ids = token_type_ids.to(DEVICE)
 
        # 模型推理
        with torch.no_grad():
            out = model(input_ids, attention_mask, token_type_ids)
            pred = out.argmax(dim=1).item()
            print("模型判定：", names[pred], "\n")
 
# ===== 主函数 =====
if __name__ == '__main__':
    test()