# -*- coding: utf-8 -*-            
# @Time : 2025/5/9 15:51
# @Author: ZZC
# @proj: BERT_NO1
# 利用训练的结果，进行测试，代码来自https://blog.csdn.net/m0_58700887/article/details/141865364
import pandas as pd
import time
import torch
import joblib
from transformers import BertTokenizer, BertForSequenceClassification
import torch.nn.functional as F

# 第一步：加载数据
file_path = '../data/ChnSentiCorp_htl_test.csv'  # 要推理的数据路径
df = pd.read_csv(file_path, encoding='utf-8')

# 第二步：加载训练好的模型
best_model_path = '../model_save'
model = BertForSequenceClassification.from_pretrained(best_model_path)
tokenizer = BertTokenizer.from_pretrained(best_model_path)

# 第三步：加载编码（训练时保存的结果）
label_encoder = joblib.load('../data/encoder.joblib')

predictions = []  # 预测值
confidence_scores = []  # 可信度，一般可信度大于0.9说明效果比较准确

# 第四步：遍历推理数据
for row in df.iterrows():
    content = row[1]['review']  # 特征列（推理样本）
    inputs = tokenizer(content, return_tensors="pt", padding=True, truncation=True, max_length=256)
    outputs = model(**inputs)
    probs = F.softmax(outputs.logits, dim=1)
    pred = torch.argmax(probs, dim=1)
    confidence = torch.max(probs, dim=1)  # 获取置信度的值

    predictions.append(pred.item())
    print('predict:',pred.item())
    print('real:',row[1]['label'])
    print("-------------------")
    confidence_scores.append(confidence.values.item())

# 第五步：将预测结果解码为类别标签
decoded_categories = label_encoder.inverse_transform(predictions)

# 第六步：创建一个空的DataFrame来存储推理结果
df['pred'] = decoded_categories
df['confidence_score'] = confidence_scores

# 将结果保存到本地
output_file_path = '../data/detect_pred.csv'  # 保存推理结果的路径
df.to_csv(output_file_path, index=False)

if __name__ == '__main__':
    pass
