# -*- coding: utf-8 -*-
import json
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
    TrainingArguments,
    Trainer,
    EvalPrediction,
)
from sklearn.metrics import precision_recall_fscore_support

class MetricsComputer:
    """
    用于计算NER任务评估指标的类。
    """

    def __init__(self, tokenizer):
        self.tokenizer = tokenizer

    def _parse_entities(self, text):
        """从模型的输出文本中解析实体。"""
        try:
            # 寻找JSON数组的开始和结束位置
            start = text.find('[')
            end = text.rfind(']') + 1
            if start == -1 or end == 0:
                return set()

            json_str = text[start:end]
            entities = json.loads(json_str)

            # 将实体转换为(entity, label)元组的集合，以便比较
            return set((d['entity'], d['label']) for d in entities if 'entity' in d and 'label' in d)
        except (json.JSONDecodeError, TypeError):
            return set()

    def __call__(self, eval_preds: EvalPrediction):
        """
        计算并返回指标。
        """
        # 1. 解码预测和标签
        preds, labels = eval_preds

        # *** FIX: Handle the case where preds might be a tuple ***
        if isinstance(preds, tuple):
            preds = preds[0]

        # 将-100的标签ID替换为pad_token_id，以便正确解码
        labels[labels == -100] = self.tokenizer.pad_token_id

        decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True)
        decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True)

        all_true_entities = []
        all_pred_entities = []

        # 响应标记，用于分割文本
        response_marker = "### Response:"

        # 2. 从解码后的文本中提取实体
        for pred, label in zip(decoded_preds, decoded_labels):
            # 提取真实标签中的实体
            if response_marker in label:
                true_text = label.split(response_marker, 1)[1]
                true_entities = self._parse_entities(true_text)
            else:
                true_entities = set()

            # 提取预测文本中的实体
            if response_marker in pred:
                pred_text = pred.split(response_marker, 1)[1]
                pred_entities = self._parse_entities(pred_text)
            else:
                pred_entities = set()

            # 为了计算sklearn的指标，我们需要一个扁平的列表
            # 我们将每个可能的实体视为一个类别
            # 这里我们简化为计算实体级别的TP, FP, FN
            tp = len(true_entities.intersection(pred_entities))
            fp = len(pred_entities - true_entities)
            fn = len(true_entities - pred_entities)

            # 创建一个代表性的列表来计算指标
            all_true_entities.extend([1] * tp + [1] * fn + [0] * fp)
            all_pred_entities.extend([1] * tp + [0] * fn + [1] * fp)

        # 3. 计算精确率、召回率和F1分数
        if len(all_true_entities) == 0:
            return {"precision": 0.0, "recall": 0.0, "f1": 0.0}

        precision, recall, f1, _ = precision_recall_fscore_support(
            all_true_entities, all_pred_entities, average='binary', pos_label=1
        )

        return {
            "precision": precision,
            "recall": recall,
            "f1": f1,
        }