"""实体识别评估（F1/精确率/召回率）"""
"""实体识别评估（F1/精确率/召回率）"""
import json
from typing import Dict, List, Any, Tuple
from seqeval.metrics import f1_score, precision_score, recall_score
from fin_senti_entity_platform.utils.logger import get_logger
from fin_senti_entity_platform.model_development.entity_recognition.bert_crf_trainer import BertCRFTrainer

logger = get_logger(__name__)

class EntityEvaluator:
    """实体识别评估工具类"""
    
    def __init__(self):
        """初始化评估器"""
        # 初始化BERT-CRF训练器用于加载模型
        self.trainer = BertCRFTrainer()
        
    def load_model(self):
        """加载已训练的模型"""
        try:
            self.trainer.load_model()
            logger.info("成功加载实体识别模型")
            return True
        except Exception as e:
            logger.error(f"加载模型失败: {e}")
            return False
    
    def evaluate_file(self, test_file_path: str) -> Dict[str, Any]:
        """
        评估测试文件
        
        Args:
            test_file_path: 测试文件路径
            
        Returns:
            评估结果字典
        """
        try:
            # 加载测试数据
            with open(test_file_path, 'r', encoding='utf-8') as f:
                test_data = json.load(f)
            
            logger.info(f"开始评估实体识别模型，测试集大小: {len(test_data)}")
            
            # 预测并评估
            return self.evaluate_data(test_data)
            
        except Exception as e:
            logger.error(f"评估文件失败: {e}")
            raise
    
    def evaluate_data(self, test_data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        评估测试数据
        
        Args:
            test_data: 测试数据列表
            
        Returns:
            评估结果字典
        """
        true_entities_list = []
        pred_entities_list = []
        
        # 逐样本进行预测和评估
        for i, sample in enumerate(test_data):
            text = sample.get('text', '')
            true_entities = sample.get('entities', [])
            
            # 预测实体
            pred_entities = self.trainer.predict(text)
            
            # 转换为评估格式
            true_entities_list.append(self._format_for_evaluation(text, true_entities))
            pred_entities_list.append(self._format_for_evaluation(text, pred_entities))
            
            # 打印进度
            if (i + 1) % 100 == 0:
                logger.info(f"已处理 {i + 1}/{len(test_data)} 条样本")
        
        # 计算整体指标
        overall_metrics = self._calculate_metrics(true_entities_list, pred_entities_list)
        
        # 计算实体类型级别的指标
        type_metrics = self._calculate_type_metrics(true_entities_list, pred_entities_list)
        
        # 综合结果
        result = {
            'overall': overall_metrics,
            'per_entity_type': type_metrics,
            'sample_count': len(test_data)
        }
        
        return result
    
    def _format_for_evaluation(self, text: str, entities: List[Dict[str, Any]]) -> List[Tuple[str, int, int, str]]:
        """格式化实体列表为评估所需的格式"""
        formatted = []
        for entity in entities:
            entity_type = entity.get('type', '')
            start = entity.get('start', 0)
            end = entity.get('end', len(text))
            
            # 确保位置有效
            if start >= 0 and end > start and end <= len(text):
                entity_text = text[start:end]
                formatted.append((entity_text, start, end, entity_type))
        
        return formatted
    
    def _calculate_metrics(self, true_entities_list: List[List[Tuple]], pred_entities_list: List[List[Tuple]]) -> Dict[str, float]:
        """计算整体评估指标"""
        # 转换为seqeval需要的格式
        true_labels = []
        pred_labels = []
        
        for true_entities, pred_entities in zip(true_entities_list, pred_entities_list):
            # 创建标签序列（简化版，实际应用中可能需要更复杂的处理）
            # 这里仅作为示例，实际应该使用token级别的标签对齐
            # 为了简化，我们假设每个文本只有一个实体
            true_label = []
            pred_label = []
            
            # 实际应用中应该根据token级别的预测结果构建标签序列
            # 这里为了演示，我们简单地将实体类型作为标签
            if true_entities:
                true_label.append(f"B-{true_entities[0][3]}")
            else:
                true_label.append("O")
            
            if pred_entities:
                pred_label.append(f"B-{pred_entities[0][3]}")
            else:
                pred_label.append("O")
            
            true_labels.append(true_label)
            pred_labels.append(pred_label)
        
        # 计算指标
        precision = precision_score(true_labels, pred_labels)
        recall = recall_score(true_labels, pred_labels)
        f1 = f1_score(true_labels, pred_labels)
        
        return {
            'precision': precision,
            'recall': recall,
            'f1': f1
        }
    
    def _calculate_type_metrics(self, true_entities_list: List[List[Tuple]], pred_entities_list: List[List[Tuple]]) -> Dict[str, Dict[str, float]]:
        """计算实体类型级别的评估指标"""
        # 统计每个实体类型的TP, FP, FN
        type_stats = {}
        
        for true_entities, pred_entities in zip(true_entities_list, pred_entities_list):
            # 构建实体类型到实体集合的映射
            true_by_type = {}
            pred_by_type = {}
            
            for ent in true_entities:
                ent_type = ent[3]
                if ent_type not in true_by_type:
                    true_by_type[ent_type] = set()
                # 使用文本内容和位置作为唯一标识
                true_by_type[ent_type].add((ent[0], ent[1], ent[2]))
            
            for ent in pred_entities:
                ent_type = ent[3]
                if ent_type not in pred_by_type:
                    pred_by_type[ent_type] = set()
                pred_by_type[ent_type].add((ent[0], ent[1], ent[2]))
            
            # 计算每个实体类型的TP, FP, FN
            all_types = set(list(true_by_type.keys()) + list(pred_by_type.keys()))
            
            for ent_type in all_types:
                if ent_type not in type_stats:
                    type_stats[ent_type] = {'tp': 0, 'fp': 0, 'fn': 0}
                
                true_set = true_by_type.get(ent_type, set())
                pred_set = pred_by_type.get(ent_type, set())
                
                # 计算TP（真正例）
                tp = len(true_set & pred_set)
                
                # 计算FP（假正例）
                fp = len(pred_set - true_set)
                
                # 计算FN（假负例）
                fn = len(true_set - pred_set)
                
                # 更新统计
                type_stats[ent_type]['tp'] += tp
                type_stats[ent_type]['fp'] += fp
                type_stats[ent_type]['fn'] += fn
        
        # 计算每个实体类型的精确率、召回率和F1分数
        type_metrics = {}
        
        for ent_type, stats in type_stats.items():
            tp, fp, fn = stats['tp'], stats['fp'], stats['fn']
            
            # 计算精确率
            precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
            
            # 计算召回率
            recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
            
            # 计算F1分数
            f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
            
            type_metrics[ent_type] = {
                'precision': precision,
                'recall': recall,
                'f1': f1,
                'support': tp + fn  # 实际正例数量
            }
        
        return type_metrics
    
    def print_evaluation_report(self, metrics: Dict[str, Any]):
        """
        打印评估报告
        
        Args:
            metrics: 评估结果字典
        """
        logger.info("===== 实体识别评估报告 =====")
        
        # 打印整体指标
        logger.info("\n整体指标:")
        logger.info(f"  精确率: {metrics['overall']['precision']:.4f}")
        logger.info(f"  召回率: {metrics['overall']['recall']:.4f}")
        logger.info(f"  F1分数: {metrics['overall']['f1']:.4f}")
        logger.info(f"  样本数量: {metrics['sample_count']}")
        
        # 打印实体类型级别的指标
        logger.info("\n实体类型级别指标:")
        for ent_type, type_metric in metrics['per_entity_type'].items():
            logger.info(f"  {ent_type}:")
            logger.info(f"    精确率: {type_metric['precision']:.4f}")
            logger.info(f"    召回率: {type_metric['recall']:.4f}")
            logger.info(f"    F1分数: {type_metric['f1']:.4f}")
            logger.info(f"    支持样本数: {type_metric['support']}")
        
        logger.info("========================")
    
    def save_evaluation_report(self, metrics: Dict[str, Any], output_path: str):
        """
        保存评估报告到文件
        
        Args:
            metrics: 评估结果字典
            output_path: 输出文件路径
        """
        try:
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(metrics, f, ensure_ascii=False, indent=2)
            
            logger.info(f"评估报告已保存到 {output_path}")
        except Exception as e:
            logger.error(f"保存评估报告失败: {e}")
            raise

# 示例用法
if __name__ == "__main__":
    # 创建评估器实例
    evaluator = EntityEvaluator()
    
    # 加载模型
    evaluator.load_model()
    
    # 评估测试文件
    test_file_path = "/path/to/test_data.json"
    try:
        metrics = evaluator.evaluate_file(test_file_path)
        
        # 打印评估报告
        evaluator.print_evaluation_report(metrics)
        
        # 保存评估报告
        evaluator.save_evaluation_report(metrics, "/path/to/evaluation_report.json")
        
    except Exception as e:
        logger.error(f"评估过程中发生错误: {e}")