import numpy as np
from bert_score import score as bert_score
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from nltk.translate.meteor_score import meteor_score
from nltk.tokenize import word_tokenize
import warnings
from 测试集评估.主要.data_v_1_0 import load_answer,load_reference_only
from tqdm import tqdm


warnings.filterwarnings('ignore')
import torch
print(torch.__version__)
print(torch.cuda.is_available())
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Using device: {device}")

class ChatbotEvaluator:
    def __init__(self):
        # 初始化平滑函数（用于BLEU）
        self.smoother = SmoothingFunction().method1

    def evaluate(self, references, predictions):
        """
        评估函数
        :param references: 标准答案列表 (list of strings)
        :param predictions: 生成答案列表 (list of strings)
        :return: 包含三个维度得分的字典
        """
        # 确保输入长度一致
        assert len(references) == len(predictions), "参考和预测数量不匹配"

        results = {
            'bleu_scores': [],
            'bert_scores': [],
            'meteor_scores': []
        }
        progress_bar = tqdm(total=len(references))
        # 逐对计算分数
        for ref, pred in zip(references, predictions):
            # 1. BLEU计算（实体词）
            ref_tokens = [word_tokenize(ref)]
            pred_tokens = word_tokenize(pred)
            bleu = sentence_bleu(
                ref_tokens,
                pred_tokens,
                # smoothing_function=self.smoother
            )
            results['bleu_scores'].append(bleu)

            # 2. BERTScore计算（语义）
            P, R, F1 = bert_score([pred], [ref], lang='en', verbose=False, device=device)

            results['bert_scores'].append(F1.item())

            # 3. METEOR计算（接近人工标准）
            meteor = meteor_score(
                [word_tokenize(ref)],
                word_tokenize(pred)
            )
            results['meteor_scores'].append(meteor)
            # 更新进度
            progress_bar.update(1)

        progress_bar.close()


        # 计算平均分
        results['avg_bleu'] = np.mean(results['bleu_scores'])
        results['avg_bert'] = np.mean(results['bert_scores'])
        results['avg_meteor'] = np.mean(results['meteor_scores'])

        # 综合得分（加权平均）
        results['composite_score'] = (
                0.2 * results['avg_bleu'] +
                0.4 * results['avg_bert'] +
                0.4 * results['avg_meteor']
        )

        return results


# 使用示例
if __name__ == "__main__":
    sheet='聊天助手'
    # 示例数据
    references = ["您好，同方股份的具体考勤时间可能因部门或岗位不同有所差异。建议您查看公司内部通知、员工手册或直接咨询HR部门获取最准确的信息哦。若有其他能帮到您的地方，请随时告诉我~"]

    predictions = ["您好，同方股份的签到规定可能不同部门之间会不太一样。建议您查看公司内部通知、员工手册或直接咨询HR部门信息。如果有其他能帮到您的地方，随时告诉我~"]

    evaluator = ChatbotEvaluator()
    scores = evaluator.evaluate(references, predictions)

    # 打印结果
    print("评估结果:")
    print(f"BLEU平均分: {scores['avg_bleu']:.4f}")
    print(f"BERTScore平均分: {scores['avg_bert']:.4f}")
    print(f"METEOR平均分: {scores['avg_meteor']:.4f}")
    print(f"综合得分: {scores['composite_score']:.4f}")

    # 详细分数
    print("\n详细分数:")
    for i, (ref, pred) in enumerate(zip(references, predictions)):
        print(f"\n样例 {i + 1}:")
        print(f"参考: {ref}")
        print(f"生成: {pred}")
        print(f"  BLEU: {scores['bleu_scores'][i]:.4f}")
        print(f"  BERT: {scores['bert_scores'][i]:.4f}")
        print(f"  METEOR: {scores['meteor_scores'][i]:.4f}")