### utils.py
"""Evaluation utilities for cover Exact Match and accuracy."""
import json
import re
import string
import unicodedata

#标准化回答
def normalize_answer(text: str) -> str:
    """Lower text, remove punctuation, articles and extra whitespace."""
    text = unicodedata.normalize("NFD", text)
    # lower
    text = text.lower()
    # remove punctuation
    text = ''.join(ch for ch in text if ch not in set(string.punctuation))
    # remove articles
    text = re.sub(r"\b(a|an|the)\b", " ", text)
    # fix whitespace
    return ' '.join(text.split())


def cover_em_score(prediction: str, ground_truth: str) -> bool:
    """Return True if normalized ground_truth appears as a substring in normalized prediction."""
    normalized_pred = normalize_answer(prediction)
    normalized_gt = normalize_answer(ground_truth)
    return normalized_gt in normalized_pred

def em_score(prediction: str, ground_truth: str) -> bool:
    """
    Return True if normalized prediction exactly equals normalized ground_truth.
    """
    normalized_pred = normalize_answer(str(prediction))
    normalized_gt = normalize_answer(ground_truth)
    return normalized_pred == normalized_gt



def evaluate_predictions_impl(references: dict,
                              predictions: dict) -> dict:
    """Compute cover EM and accuracy metrics."""
    missing_predictions = 0
    correct = 0

    for question, answers in references.items():
        if question not in predictions:
            missing_predictions += 1
            continue
        pred = predictions[question]
        if isinstance(pred,str):
            pred = [pred]
        # support multiple ground truths
        is_match = any(em_score(pred, gt)for pred in pred for gt in answers)
        correct += int(is_match)

    total = len(predictions)
    accuracy = correct / total if total > 0 else 0.0

    return {
        'missing_predictions': missing_predictions,
        'num_correct': correct,
        'num_total': total,
        'accuracy': accuracy,
    }


def evaluate_predictions(
    references_path: str,
    predictions_path: str,
    answer_field: str = "answer"
) -> dict:
    """Load JSONL files and invoke EM/accuracy evaluation.

    The is_regex flag is kept for compatibility but not used in cover EM evaluation."""
    # Load references
    references = {}
    with open(references_path, 'r', encoding='utf-8') as ref_file:
        for line in ref_file:
            example = json.loads(line)
            q = example.get('question')
            answers = example.get(answer_field)
            if isinstance(answers, str):
                answers = [answers]
            references[q] = answers

    # Load predictions
    predictions = {}
    with open(predictions_path, 'r', encoding='utf-8') as pred_file:
        for line in pred_file:
            example = json.loads(line)
            predictions[example.get('question')] = example.get('prediction')

    # Evaluate
    return evaluate_predictions_impl(
        references=references,
        predictions=predictions
    )
