import re
import numpy as np
import jieba
from rouge import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from collections import defaultdict


def preprocess(text, is_char_level=False):
    """预处理文本：分词或字符级拆分"""
    text = text.strip()
    if not text:
        return []
    if is_char_level:
        return list(text)  # 字符级，如"中国"→["中", "国"]
    else:
        return jieba.lcut(text)  # 分词级，如"中国"→["中国"]


def calculate_bleu(generated, reference, n_gram=4, is_char_level=False):
    """计算BLEU得分（支持1-4gram，中文支持字符级或分词级）"""
    if not generated or not reference:
        return 0.0

    gen_tokens = preprocess(generated, is_char_level=is_char_level)
    ref_tokens = [preprocess(reference, is_char_level=is_char_level)]  # 单参考摘要转为列表

    if not gen_tokens or not ref_tokens[0]:
        return 0.0

    weights = tuple(1.0 / n_gram for _ in range(n_gram))
    smoothie = SmoothingFunction().method7  # 使用平滑处理

    return sentence_bleu(ref_tokens, gen_tokens, weights=weights, smoothing_function=smoothie)


def parse_file(file_path):
    """解析文件，提取生成摘要和参考摘要对"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            text = f.read()
    except FileNotFoundError:
        print(f"错误：文件 {file_path} 未找到")
        return [], []
    except UnicodeDecodeError:
        print(f"错误：文件 {file_path} 编码问题")
        return [], []

    # 使用更健壮的正则表达式匹配摘要对
    pattern = r'Generated Summary\s*[:\-]\s*(.*?)\s*(?:Reference Summary|Human Summary|Gold Summary)\s*[:\-]\s*(.*?)(?=(?:\s*Generated Summary|$))'
    matches = re.findall(pattern, text, re.DOTALL | re.IGNORECASE)

    # 拆分为预测列表和参考列表
    if not matches:
        print("警告：未提取到任何摘要对")
        return [], []

    preds, refs = zip(*[(gen.strip(), ref.strip()) for gen, ref in matches])
    return list(preds), list(refs)


def evaluate_rouge(preds, refs):
    """计算ROUGE得分"""
    if not preds or not refs:
        return {
            'ROUGE-1 F1': 0.0,
            'ROUGE-2 F1': 0.0,
            'ROUGE-L F1': 0.0,
        }

    # 清理文本（去除可能的多余空格）
    decoded_preds = [pred.replace(" ", "") for pred in preds]
    decoded_labels = [label.replace(" ", "") for label in refs]

    # 使用jieba分词，转换为空格分隔的字符串
    tokenized_preds = [" ".join(jieba.lcut(pred)) for pred in decoded_preds]
    tokenized_labels = [" ".join(jieba.lcut(label)) for label in decoded_labels]

    # 初始化ROUGE计算器
    rouge = Rouge()

    try:
        # 计算ROUGE得分（avg=True表示返回平均得分）
        scores = rouge.get_scores(tokenized_preds, tokenized_labels, avg=True)
    except ValueError:
        # 处理可能的长度不匹配问题
        min_len = min(len(tokenized_preds), len(tokenized_labels))
        scores = rouge.get_scores(tokenized_preds[:min_len], tokenized_labels[:min_len], avg=True)

    # 整理结果（乘以100转换为百分比）
    rouge_scores = {
        'ROUGE-1 F1': scores['rouge-1']['f'] * 100,
        'ROUGE-2 F1': scores['rouge-2']['f'] * 100,
        'ROUGE-L F1': scores['rouge-l']['f'] * 100,
    }
    return rouge_scores


def evaluate_bleu(preds, refs, n_gram=4):
    """计算BLEU得分，包括不同n-gram的得分"""
    if not preds or not refs:
        return defaultdict(float)

    # 初始化不同n-gram的得分字典
    bleu_scores = defaultdict(list)

    # 遍历计算每对摘要的得分
    for gen, ref in zip(preds, refs):
        # 计算不同n-gram的BLEU得分
        for n in range(1, n_gram + 1):
            # 字符级计算
            char_bleu = calculate_bleu(gen, ref, n_gram=n, is_char_level=True)
            bleu_scores[f"BLEU-{n} 字符级"].append(char_bleu)

            # 分词级计算
            word_bleu = calculate_bleu(gen, ref, n_gram=n, is_char_level=False)
            bleu_scores[f"BLEU-{n} 分词级"].append(word_bleu)

    # 计算平均分
    results = {}
    for metric, scores in bleu_scores.items():
        results[metric] = np.mean(scores) if scores else 0.0

    return results


def main(file_path, n_gram=4):
    """主函数：读取文件，计算BLEU和ROUGE分数并输出结果"""
    # 解析文件获取预测和参考摘要
    generated_summaries, reference_summaries = parse_file(file_path)

    if not generated_summaries or not reference_summaries:
        print("未检测到有效的摘要对，请检查文件格式")
        return

    print(f"成功提取 {len(generated_summaries)} 对摘要，开始计算指标得分...")

    # 计算ROUGE得分
    rouge_scores = evaluate_rouge(generated_summaries, reference_summaries)

    # 计算BLEU得分
    bleu_scores = evaluate_bleu(generated_summaries, reference_summaries, n_gram=n_gram)

    # 输出结果
    print("\n================ ROUGE 得分 ==================")
    for metric, score in rouge_scores.items():
        print(f"{metric}: {score:.4f}")

    print("\n================ BLEU 得分 ==================")
    for metric, score in bleu_scores.items():
        print(f"{metric}: {score:.4f}")


if __name__ == "__main__":
    file_path = r"C:\Users\22581\Desktop\训练结果\lcsts\Test\bart_lcsts_1dense.txt"
    n_gram = 4

    main(file_path, n_gram)