# from transformers import BertTokenizer, BertForMaskedLM
# import torch
#
# # 指定本地模型路径（注意路径格式）
# model_path = r"C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\lab1\model\bert-base-chinese"
#
# # 加载本地模型和分词器
# tokenizer = BertTokenizer.from_pretrained(model_path)
# model = BertForMaskedLM.from_pretrained(model_path)
#
# def main():
#     def calculate_perplexity(text):
#         inputs = tokenizer(text,
#                           return_tensors="pt",
#                           truncation=True,
#                           max_length=512)
#         with torch.no_grad():
#             outputs = model(**inputs, labels=inputs["input_ids"])
#         loss = outputs.loss
#         return torch.exp(loss).item()  # 困惑度
#
#     # 示例：计算评论得分
#     content = "记者在现场进行了详细报道"
#     perplexity = calculate_perplexity(content)
#     score = 1 / perplexity  # 将困惑度转换为正向得分
#     print(f"语义连贯度得分：{score:.4f}")
#
# if __name__ == '__main__':
#     main() # 得分高表示语义更连贯

import pandas as pd
from transformers import BertTokenizer, BertForMaskedLM
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt

# 配置中文字体显示（如需要绘图）
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 本地BERT模型路径
model_path = r"C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\lab1\model\bert-base-chinese"

# 加载模型和分词器
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForMaskedLM.from_pretrained(model_path)
model.eval()  # 设置为评估模式

# GPU加速
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)


def calculate_perplexity_simple(text):
    """简化版困惑度计算函数"""

    try:
        # 简化输入处理（移除复杂掩码策略）
        inputs = tokenizer(text,
                           return_tensors="pt",
                           truncation=True,
                           max_length=512,
                           padding=True)  # 自动填充

        inputs = {k: v.to(device) for k, v in inputs.items()}

        # 直接计算交叉熵损失
        with torch.no_grad():
            outputs = model(**inputs, labels=inputs["input_ids"])

        return torch.exp(outputs.loss).item()

    except Exception as e:
        print(f"计算失败: {text[:20]}... | 错误: {str(e)[:50]}")
        return None

def main():
    file_path = r"C:\Users\23248\PycharmProjects\stance\DataCrawler\data\tot.xlsx"
    df = pd.read_excel(file_path, engine='openpyxl')
    df['content'] = df['content'].astype(str).str.strip()  # 清理空白文本

    # 分组计算
    results = []
    for topic, group in tqdm(df.groupby('topic'), desc="处理进度"):
        ppl_values = []
        for content in group['content']:
            if len(content) < 2:  # 过滤无效短文本
                continue
            ppl = calculate_perplexity_simple(content)
            if ppl is not None and 1 < ppl < 1e6:  # 过滤异常值
                ppl_values.append(ppl)

        if len(ppl_values) > 0:
            avg_ppl = sum(ppl_values) / len(ppl_values)
            results.append({
                'topic': topic,
                '平均困惑度': round(avg_ppl, 2),
                '有效样本数': len(ppl_values),
                '总样本数': len(group)
            })

    # 保存结果
    result_df = pd.DataFrame(results)
    output_path = r"C:\Users\23248\PycharmProjects\stance\DataCrawler\data\topic_perplexity_results_corrected.xlsx"
    result_df.to_excel(output_path, index=False)

    print("处理完成！典型结果示例：")
    print(result_df.head())

if __name__ == '__main__':
    main()