# -*- coding: utf-8 -*-
r"""
评分脚本：
- 依次读取 d:\PyCharm\PyCharmFile\autoQA\测试结果 下的 xlsx 文件
- 对每一行的 “原始答案” 与 “大模型答案” 计算四项分数：F1、BLEU-4、BERTScore-F1、ROUGE-L
- 在 d:\PyCharm\PyCharmFile\autoQA\final_score 中为每个文件创建同名文件夹（若存在则删除重建）
- 将原有表格复制为新文件，并在末尾追加四列分数
"""

import os
import shutil
from pathlib import Path
from typing import List, Tuple
import math
import re
import pandas as pd

# 可选：使用 transformers 和 torch 计算 BERTScore
try:
    import torch
    from transformers import AutoTokenizer, AutoModel
    BERT_AVAILABLE = True
except Exception as e:
    print(f"警告：BERTScore所需库导入失败，将跳过BERTScore计算：{e}")
    BERT_AVAILABLE = False


def _strip_punct_spaces(text: str) -> str:
    """去除空白与常见标点，适合中文字符级比较。"""
    if text is None:
        return ""
    text = str(text)
    # 去掉方括号类标注
    text = re.sub(r"\[.*?\]", "", text)
    text = re.sub(r"【.*?】", "", text)
    # 去除常见中文/英文标点与空白
    punct = "，。；：！、（）《》“”‘’—…-_,.;:!?()[]{}<>\n\r\t\f\v\u3000"
    return "".join(ch for ch in text if ch not in punct and not ch.isspace())


def _char_tokens(text: str) -> List[str]:
    """将文本转为字符级token列表（中文友好）。"""
    cleaned = _strip_punct_spaces(text)
    return list(cleaned)


def _count_overlap(tokens_a: List[str], tokens_b: List[str]) -> int:
    from collections import Counter
    ca, cb = Counter(tokens_a), Counter(tokens_b)
    return sum(min(ca[t], cb[t]) for t in ca.keys())


def f1_score(pred: str, ref: str) -> float:
    """字符级F1：2*P*R/(P+R)。"""
    ta, tb = _char_tokens(pred), _char_tokens(ref)
    if not ta and not tb:
        return 1.0
    if not ta or not tb:
        return 0.0
    overlap = _count_overlap(ta, tb)
    p = overlap / max(len(ta), 1)
    r = overlap / max(len(tb), 1)
    return (2 * p * r / (p + r)) if (p + r) > 0 else 0.0


def _ngrams(tokens: List[str], n: int) -> List[Tuple[str, ...]]:
    return [tuple(tokens[i:i+n]) for i in range(max(len(tokens) - n + 1, 0))]


def bleu4_score(pred: str, ref: str) -> float:
    """简易BLEU-4（字符级）。带平滑与简化brevity penalty。"""
    ta, tb = _char_tokens(pred), _char_tokens(ref)
    if not ta and not tb:
        return 1.0
    if not ta or not tb:
        return 0.0
    from collections import Counter

    precisions = []
    for n in range(1, 4 + 1):
        ng_pred = _ngrams(ta, n)
        ng_ref = _ngrams(tb, n)
        if not ng_pred:
            precisions.append(0.0)
            continue
        cp, cr = Counter(ng_pred), Counter(ng_ref)
        overlap = sum(min(cp[g], cr[g]) for g in cp)
        # 加1平滑，避免为零
        p_n = (overlap + 1) / (sum(cp.values()) + 1)
        precisions.append(p_n)

    # 几何平均
    geo_mean = math.exp(sum(math.log(max(p, 1e-9)) for p in precisions) / 4.0)
    # brevity penalty
    ref_len, pred_len = len(tb), len(ta)
    bp = 1.0 if pred_len > ref_len else math.exp(1 - ref_len / max(pred_len, 1))
    return bp * geo_mean


def rouge_l_score(pred: str, ref: str) -> float:
    """ROUGE-L（基于最长公共子序列的F1）。字符级。"""
    a, b = _char_tokens(pred), _char_tokens(ref)
    if not a and not b:
        return 1.0
    if not a or not b:
        return 0.0
    # LCS 动态规划
    la, lb = len(a), len(b)
    dp = [[0] * (lb + 1) for _ in range(la + 1)]
    for i in range(1, la + 1):
        for j in range(1, lb + 1):
            if a[i - 1] == b[j - 1]:
                dp[i][j] = dp[i - 1][j - 1] + 1
            else:
                dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
    lcs = dp[la][lb]
    p = lcs / la
    r = lcs / lb
    return (2 * p * r / (p + r)) if (p + r) > 0 else 0.0


class BertScorer:
    """简易BERTScore-F1实现：token向量余弦相似，取最大匹配平均。"""
    def __init__(self, model_name: str = 'bert-base-chinese'):
        # 标记是否可以使用真实BERTScore
        self.enabled = False
        if not BERT_AVAILABLE:
            # 无torch/transformers时占位初始化
            self.tokenizer = None
            self.model = None
            self.device = 'cpu'
            return
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # 优先使用离线加载，避免网络不可达导致报错
        os.environ.setdefault('HF_ENDPOINT', 'https://hf-mirror.com')
        os.environ.setdefault('HF_HUB_OFFLINE', '1')
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_name, local_files_only=True)
            self.model = AutoModel.from_pretrained(model_name, local_files_only=True)
            self.model.to(self.device)
            self.model.eval()
            self.enabled = True
        except Exception as e:
            # 离线模型不存在或加载失败，降级到占位实现
            print(f"BERT模型加载失败或离线不可用，将使用降级BERTScore：{e}")
            self.tokenizer = None
            self.model = None
            self.device = 'cpu'

    def score_f1(self, pred: str, ref: str) -> float:
        if not BERT_AVAILABLE or not getattr(self, 'enabled', False):
            # 无BERT时的降级：字符分布余弦相似度（非真实BERTScore，仅为占位避免全0）
            def char_cosine(a: str, b: str) -> float:
                ta = _char_tokens(a)
                tb = _char_tokens(b)
                if not ta and not tb:
                    return 1.0
                if not ta or not tb:
                    return 0.0
                from collections import Counter
                ca, cb = Counter(ta), Counter(tb)
                keys = set(ca.keys()) | set(cb.keys())
                va = [ca.get(k, 0) for k in keys]
                vb = [cb.get(k, 0) for k in keys]
                import math
                dot = sum(x*y for x, y in zip(va, vb))
                na = math.sqrt(sum(x*x for x in va))
                nb = math.sqrt(sum(y*y for y in vb))
                return (dot / (na*nb)) if (na > 0 and nb > 0) else 0.0
            return char_cosine(pred or '', ref or '')
        pred = str(pred or '')
        ref = str(ref or '')
        if len(pred.strip()) == 0 and len(ref.strip()) == 0:
            return 1.0
        if len(pred.strip()) == 0 or len(ref.strip()) == 0:
            return 0.0

        with torch.no_grad():
            ip = self.tokenizer(pred, return_tensors='pt', truncation=True)
            ir = self.tokenizer(ref, return_tensors='pt', truncation=True)
            ip = {k: v.to(self.device) for k, v in ip.items()}
            ir = {k: v.to(self.device) for k, v in ir.items()}
            hp = self.model(**ip).last_hidden_state.squeeze(0)  # [Lp, H]
            hr = self.model(**ir).last_hidden_state.squeeze(0)  # [Lr, H]

            # 去除特殊token（CLS/SEP等）
            sp_ids = set(self.tokenizer.all_special_ids)
            ip_ids = ip['input_ids'].squeeze(0)
            ir_ids = ir['input_ids'].squeeze(0)
            mp = torch.tensor([tid.item() not in sp_ids for tid in ip_ids], device=self.device)
            mr = torch.tensor([tid.item() not in sp_ids for tid in ir_ids], device=self.device)
            hp = hp[mp]
            hr = hr[mr]

            if hp.numel() == 0 or hr.numel() == 0:
                return 0.0

            # 归一化
            hp = torch.nn.functional.normalize(hp, p=2, dim=1)
            hr = torch.nn.functional.normalize(hr, p=2, dim=1)
            sim = torch.mm(hp, hr.t())  # [Lp, Lr]
            sim = torch.clamp(sim, min=0.0)  # 仅取非负相似度
            # Precision: 平均每个pred token的最大相似度
            P = sim.max(dim=1).values.mean().item()
            # Recall: 平均每个ref token的最大相似度
            R = sim.max(dim=0).values.mean().item()
            if (P + R) == 0:
                return 0.0
            return 2 * P * R / (P + R)


def process_file(excel_path: Path, scorer: BertScorer):
    print(f"处理结果文件：{excel_path.name}")
    try:
        df = pd.read_excel(excel_path)
    except Exception as e:
        print(f"读取Excel失败：{e}")
        return

    # 检查必要列
    if '原始答案' not in df.columns or '大模型答案' not in df.columns:
        print("表格缺少必要列：原始答案 或 大模型答案")
        return

    f1_list, bleu_list, bert_list, rouge_list = [], [], [], []
    for idx, row in df.iterrows():
        ref = str(row.get('原始答案', '') or '')
        pred = str(row.get('大模型答案', '') or '')

        # 逐项计算并四舍五入到两位小数
        f1 = round(f1_score(pred, ref), 2)
        bleu = round(bleu4_score(pred, ref), 2)
        rouge = round(rouge_l_score(pred, ref), 2)
        # 始终调用 scorer，让其在无BERT时走降级逻辑，并保留两位小数
        bert = round(scorer.score_f1(pred, ref), 2)

        f1_list.append(f1)
        bleu_list.append(bleu)
        bert_list.append(bert)
        rouge_list.append(rouge)

    # 追加四列
    df['F1分数'] = f1_list
    df['BLEU分数'] = bleu_list
    df['BERTScore'] = bert_list
    df['ROUGE分数'] = rouge_list

    # 追加平均分作为最后一行
    f1_mean = round((sum(f1_list) / len(f1_list)) if f1_list else 0.0, 2)
    bleu_mean = round((sum(bleu_list) / len(bleu_list)) if bleu_list else 0.0, 2)
    bert_mean = round((sum(bert_list) / len(bert_list)) if bert_list else 0.0, 2)
    rouge_mean = round((sum(rouge_list) / len(rouge_list)) if rouge_list else 0.0, 2)
    avg_row = {col: '' for col in df.columns}
    if '序号' in avg_row:
        avg_row['序号'] = None
    if '问题内容' in avg_row:
        avg_row['问题内容'] = '平均分'
    avg_row['F1分数'] = f1_mean
    avg_row['BLEU分数'] = bleu_mean
    avg_row['BERTScore'] = bert_mean
    avg_row['ROUGE分数'] = rouge_mean
    try:
        df = pd.concat([df, pd.DataFrame([avg_row])], ignore_index=True)
    except Exception:
        # 最坏情况使用loc追加
        df.loc[len(df)] = avg_row

    # final_score 目录直接放文件，不再使用子文件夹
    out_root = excel_path.parents[0].parents[0] / 'final_score'
    out_root.mkdir(exist_ok=True)
    out_file = out_root / excel_path.name
    # 若已存在同名文件则先删除
    if out_file.exists():
        try:
            out_file.unlink()
        except Exception:
            pass
    try:
        with pd.ExcelWriter(out_file, engine='openpyxl') as writer:
            df.to_excel(writer, index=False, sheet_name='测试结果')
            # 设置分数列显示为两位小数
            ws = writer.sheets.get('测试结果')
            if ws is None:
                ws = writer.book['测试结果']
            metric_cols = ['F1分数', 'BLEU分数', 'BERTScore', 'ROUGE分数']
            for col_name in metric_cols:
                if col_name in df.columns:
                    col_idx = df.columns.get_loc(col_name) + 1  # openpyxl列索引从1开始
                    for r in range(2, len(df) + 1):  # 数据行（含平均分行）
                        cell = ws.cell(row=r, column=col_idx)
                        cell.number_format = '0.00'
        print(f"评分结果已保存：{out_file}")
    except Exception as e:
        print(f"保存评分结果失败：{e}")


def main():
    print("=== 分数计算脚本 ===")
    results_dir = Path('测试结果')
    if not results_dir.exists():
        print("测试结果 目录不存在")
        return

    scorer = BertScorer(model_name='bert-base-chinese')

    excel_files = list(results_dir.glob('*.xlsx'))
    if not excel_files:
        print("测试结果 目录中未找到xlsx文件")
        return

    for excel_path in excel_files:
        process_file(excel_path, scorer)

    print("全部文件评分完成！")


if __name__ == '__main__':
    main()