import torch
from transformers import BertForMaskedLM, BertTokenizer
import numpy as np
import pandas as pd
from transformers import BertTokenizer, BertForMaskedLM
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt

class CoherenceEvaluator:
    def __init__(self, model_path=r"C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\lab1\model\bert-base-chinese"):
        self.tokenizer = BertTokenizer.from_pretrained(model_path)
        self.model = BertForMaskedLM.from_pretrained(model_path)
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)
        self.model.eval()

    def calculate_perplexity(self, text, sample_ratio=0.3):
        """基于动态掩码策略的困惑度计算"""
        tokens = self.tokenizer.tokenize(text)
        if len(tokens) < 3:  # 过滤短文本
            return float('inf')

        # 动态选择掩码位置（排除特殊符号）
        valid_indices = [i for i, t in enumerate(tokens) if t not in ['[CLS]', '[SEP]']]
        sample_size = max(1, int(len(valid_indices) * sample_ratio))
        masked_indices = np.random.choice(valid_indices, sample_size, replace=False)

        total_nll = 0.0
        with torch.no_grad():
            for idx in masked_indices:
                masked_tokens = tokens.copy()
                original_token = masked_tokens[idx]
                masked_tokens[idx] = '[MASK]'

                inputs = self.tokenizer.encode_plus(
                    masked_tokens,
                    return_tensors='pt',
                    is_split_into_words=True,
                    max_length=512,
                    truncation=True
                ).to(self.device)

                outputs = self.model(**inputs)
                logits = outputs.logits[0, idx + 1]  # +1 因添加了[CLS]
                probs = torch.nn.functional.softmax(logits, dim=-1)
                token_id = self.tokenizer.convert_tokens_to_ids([original_token])[0]
                total_nll += -torch.log(probs[token_id]).item()

        avg_nll = total_nll / len(masked_indices)
        perplexity = np.exp(avg_nll)
        return perplexity

    def coherence_score(self, text, max_ppl=150):
        """语义连贯度评分函数"""
        ppl = self.calculate_perplexity(text)
        score = 1 - (ppl / max_ppl) if ppl < max_ppl else 0
        return round(score, 4)

def main():
    # # 使用示例
    evaluator = CoherenceEvaluator()
    text = "##￥%……&*（）"
    score = evaluator.coherence_score(text)
    print(f"Coherence Score: {score}")  # 输出示例：0.7823

    # file_path = r"C:\Users\23248\PycharmProjects\stance\DataCrawler\data\tot.xlsx"
    # df = pd.read_excel(file_path, engine='openpyxl')
    # df['content'] = df['content'].astype(str).str.strip()  # 清理空白文本
    #
    # # 分组计算
    # results = []
    # for topic, group in tqdm(df.groupby('topic'), desc="处理进度"):
    #     ppl_values = []
    #     for content in group['content']:
    #         if len(content) < 2:  # 过滤无效短文本
    #             continue
    #         ppl = evaluator.coherence_score(content)
    #         if ppl is not None :  # 过滤异常值
    #             ppl_values.append(ppl)
    #
    #     if len(ppl_values) > 0:
    #         avg_ppl = sum(ppl_values) / len(ppl_values)
    #         results.append({
    #             'topic': topic,
    #             '平均困惑度': round(avg_ppl, 2),
    #             '有效样本数': len(ppl_values),
    #             '总样本数': len(group)
    #         })
    #
    # # 保存结果
    # result_df = pd.DataFrame(results)
    # output_path = r"C:\Users\23248\PycharmProjects\stance\DataCrawler\data\topic_perplexity_results_corrected.xlsx"
    # result_df.to_excel(output_path, index=False)
    #
    # print("处理完成！典型结果示例：")
    # print(result_df.head())

if __name__ == '__main__':
    main()