from bert_score import score as bert_score
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from nltk.translate.meteor_score import meteor_score
from nltk.tokenize import word_tokenize
import warnings
from data_v_1_0 import load_answer, load_reference_only
from tqdm import tqdm
from openpyxl import Workbook
import os

"""
    v0.2:
    增加 保存到excel
    
"""
warnings.filterwarnings('ignore')
import torch

print(torch.__version__)
print(torch.cuda.is_available())
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Using device: {device}")


class ChatbotEvaluator:
    def __init__(self, output_file='evaluation_results.xlsx'):
        # 初始化平滑函数（用于BLEU）
        self.smoother = SmoothingFunction().method1
        self.output_file = output_file

        # 初始化Excel工作簿
        self.wb = Workbook()
        # 创建详细结果工作表
        self.detailed_ws = self.wb.active
        self.detailed_ws.title = "Detailed Scores"
        # 添加表头
        self.detailed_ws.append(['Index', 'Reference', 'Prediction', 'BLEU Score', 'BERT Score', 'METEOR Score'])

        # 创建汇总结果工作表
        self.summary_ws = self.wb.create_sheet(title="Summary Scores")
        self.summary_ws.append(['Metric', 'Average Score'])

        # 初始化统计变量
        self.total_bleu = 0.0
        self.total_bert = 0.0
        self.total_meteor = 0.0
        self.count = 0

    def save_to_excel(self):
        """保存Excel文件"""
        self.wb.save(self.output_file)

    def evaluate(self, references, predictions):
        """
        评估函数
        :param references: 标准答案列表 (list of strings)
        :param predictions: 生成答案列表 (list of strings)
        """
        # 确保输入长度一致
        assert len(references) == len(predictions), "参考和预测数量不匹配"

        progress_bar = tqdm(total=len(references))

        # 逐对计算分数
        for i, (ref, pred) in enumerate(zip(references, predictions)):
            try:
                # 1. BLEU计算（实体词）
                ref_tokens = [word_tokenize(ref)]
                pred_tokens = word_tokenize(pred)
                bleu = sentence_bleu(
                    ref_tokens,
                    pred_tokens,
                    smoothing_function=self.smoother
                )

                # 2. BERTScore计算（语义）
                P, R, F1 = bert_score([pred], [ref], lang='en', verbose=False, device=device)
                bert = F1.item()

                # 3. METEOR计算（接近人工标准）
                meteor = meteor_score(
                    [word_tokenize(ref)],
                    word_tokenize(pred)
                )

                # 更新统计变量
                self.total_bleu += bleu
                self.total_bert += bert
                self.total_meteor += meteor
                self.count += 1

                # 立即写入详细结果
                self.detailed_ws.append([
                    i + 1,
                    ref,
                    pred,
                    bleu,
                    bert,
                    meteor
                ])

                # 每处理10条数据保存一次，防止频繁IO操作
                if (i + 1) % 10 == 0:
                    self.save_to_excel()

                # 更新进度
                progress_bar.update(1)

            except Exception as e:
                print(f"处理第 {i + 1} 条数据时出错: {str(e)}")
                continue

        progress_bar.close()

        # 计算平均分
        avg_bleu = self.total_bleu / self.count if self.count > 0 else 0
        avg_bert = self.total_bert / self.count if self.count > 0 else 0
        avg_meteor = self.total_meteor / self.count if self.count > 0 else 0

        # 综合得分（加权平均）
        composite_score = (
                0.2 * avg_bleu +
                0.4 * avg_bert +
                0.4 * avg_meteor
        )

        # 写入汇总结果
        self.summary_ws.append(['BLEU', avg_bleu])
        self.summary_ws.append(['BERTScore', avg_bert])
        self.summary_ws.append(['METEOR', avg_meteor])
        self.summary_ws.append(['Composite Score', composite_score])

        # 最终保存
        self.save_to_excel()

        # 返回结果
        return {
            'avg_bleu': avg_bleu,
            'avg_bert': avg_bert,
            'avg_meteor': avg_meteor,
            'composite_score': composite_score
        }


# 使用示例
if __name__ == "__main__":
    sheet = '聊天助手'
    # 回答文件
    file_answer="聊天助手_answers_20250428_182553.json"
    # 示例数据
    references = load_reference_only(sheet)

    predictions = load_answer(file_answer)

    # 如果文件已存在，先删除
    if os.path.exists('../data/evaluation_results.xlsx'):
        os.remove('../data/evaluation_results.xlsx')

    evaluator = ChatbotEvaluator(output_file=f"../data/{sheet}_evaluation_results.xlsx")
    scores = evaluator.evaluate(references, predictions)

    # 打印结果
    print("\n评估结果:")
    print(f"BLEU平均分: {scores['avg_bleu']:.4f}")
    print(f"BERTScore平均分: {scores['avg_bert']:.4f}")
    print(f"METEOR平均分: {scores['avg_meteor']:.4f}")
    print(f"综合得分: {scores['composite_score']:.4f}")

    print(f"\n评估结果已保存到 ../data/{sheet}_evaluation_results.xlsx 文件")