#!/usr/bin/env python3
"""
通用关键字段信息抽取 - 评估脚本

评估模型在测试集上的性能，包括：
1. 字段级别的准确率
2. 整体提取准确率
3. 性能指标统计
"""

import json
import yaml
import argparse
import sys
from pathlib import Path
from typing import Dict, List, Any, Tuple
import logging
from datetime import datetime
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import matplotlib.pyplot as plt
import seaborn as sns

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 尝试导入模板管理器
try:
    from template_manager import get_template_manager
    TEMPLATE_MANAGER_AVAILABLE = True
except ImportError:
    logger.warning("模板管理器不可用，使用内置字段定义")
    TEMPLATE_MANAGER_AVAILABLE = False


class UniversalDocumentEvaluator:
    """通用文档信息抽取评估器
    
    支持多种文档类型的模型性能评估：
    - 合同文档
    - 发票文档
    - 简历文档
    - 医疗报告
    - 身份证
    - 其他结构化文档
    """
    
    def __init__(self, config_path: str = "config/config.yaml", document_type: str = "contract"):
        """初始化评估器
        
        Args:
            config_path: 配置文件路径
            document_type: 文档类型
        """
        self.config = self._load_config(config_path)
        self.document_type = document_type
        self.document_fields = self._load_document_fields(document_type)
        
        # 创建输出目录
        self.output_dir = Path("outputs/evaluation")
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # 默认文档类型
        self.default_document_type = "contract"
        
    def _load_config(self, config_path: str) -> dict:
        """加载配置文件"""
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)
    
    def _load_document_fields(self, document_type: str) -> List[str]:
        """加载文档字段列表"""
        if TEMPLATE_MANAGER_AVAILABLE:
            try:
                manager = get_template_manager()
                template = manager.get_template(document_type)
                if template:
                    fields = template.get_field_names()
                    if fields:
                        logger.info(f"✅ 从模板管理器加载字段: {len(fields)} 个")
                        return fields
            except Exception as e:
                logger.warning(f"⚠️  模板管理器加载字段失败: {e}")
        
        # 回退到直接从配置文件加载
        template_path = Path(f"config/templates/{document_type}.yaml")
        if not template_path.exists():
            logger.warning(f"⚠️  未找到模板文件: {template_path}，使用默认合同模板")
            return self._get_default_fields()
        
        try:
            with open(template_path, 'r', encoding='utf-8') as f:
                template = yaml.safe_load(f)
                if 'fields' in template and isinstance(template['fields'], list):
                    # 兼容两种字段格式：[{name: "..."}] 和 [{field_name: "..."}]
                    fields = []
                    for field in template['fields']:
                        if isinstance(field, dict):
                            field_name = field.get('name') or field.get('field_name')
                            if field_name:
                                fields.append(field_name)
                    if fields:
                        logger.info(f"✅ 从模板配置加载字段: {len(fields)} 个")
                        return fields
        except Exception as e:
            logger.error(f"❌ 加载模板失败: {e}，使用默认合同模板")
            return self._get_default_fields()
        
        # 最后的回退方案
        return self._get_default_fields()
    
    def _get_default_fields(self) -> List[str]:
        """获取默认合同字段"""
        # 尝试加载默认的合同模板配置文件
        default_template_path = Path("config/templates/contract.yaml")
        if default_template_path.exists():
            try:
                with open(default_template_path, 'r', encoding='utf-8') as f:
                    template = yaml.safe_load(f)
                    if 'fields' in template and isinstance(template['fields'], list):
                        # 兼容两种字段格式：[{name: "..."}] 和 [{field_name: "..."}]
                        fields = []
                        for field in template['fields']:
                            if isinstance(field, dict):
                                field_name = field.get('name') or field.get('field_name')
                                if field_name:
                                    fields.append(field_name)
                        if fields:
                            logger.info(f"✅ 从默认模板加载字段: {len(fields)} 个")
                            return fields
            except Exception as e:
                logger.error(f"❌ 加载默认模板失败: {e}")
        
        # 如果无法加载配置文件，则使用最小化的默认字段列表
        logger.warning("⚠️  使用硬编码的默认字段")
        return [
            "合同名称", "合同编号", "甲方名称", "甲方地址", "甲方联系人",
            "乙方名称", "乙方地址", "乙方联系人", "合同金额", "履行期限",
            "签署日期", "生效日期", "履行地点", "付款方式", "违约责任"
        ]
    
    def _normalize_text(self, text: str) -> str:
        """标准化文本用于比较"""
        if not text or text == "未找到":
            return ""
        
        # 移除空格和标点符号
        import re
        normalized = re.sub(r'[^\w\u4e00-\u9fff]', '', str(text))
        return normalized.lower()
    
    def _calculate_text_similarity(self, pred_text: str, true_text: str) -> float:
        """计算文本相似度"""
        pred_norm = self._normalize_text(pred_text)
        true_norm = self._normalize_text(true_text)
        
        if not true_norm and not pred_norm:
            return 1.0  # 都为空，认为完全匹配
        
        if not true_norm or not pred_norm:
            return 0.0  # 一个为空一个不为空
        
        # 完全匹配
        if pred_norm == true_norm:
            return 1.0
        
        # 计算编辑距离相似度
        from difflib import SequenceMatcher
        similarity = SequenceMatcher(None, pred_norm, true_norm).ratio()
        
        # 如果相似度很高，给予高分
        if similarity >= 0.9:
            return 0.9 + (similarity - 0.9) * 0.1  # 0.9-1.0之间
        elif similarity >= 0.7:
            return 0.7 + (similarity - 0.7) * 0.2  # 0.7-0.9之间
        else:
            return similarity * 0.7  # 0.0-0.7之间
    
    def _calculate_field_accuracy(self, pred_value: str, true_value: str) -> float:
        """计算单个字段的准确率"""
        return self._calculate_text_similarity(pred_value, true_value)
    
    def _calculate_sample_metrics(self, predictions: Dict[str, str], 
                                ground_truth: Dict[str, str], 
                                document_fields: List[str]) -> Dict[str, float]:
        """计算单个样本的评估指标"""
        field_accuracies = {}
        total_accuracy = 0
        valid_fields = 0
        
        for field in document_fields:
            pred_val = predictions.get(field, "未找到")
            true_val = ground_truth.get(field, "未找到")
            
            accuracy = self._calculate_field_accuracy(pred_val, true_val)
            field_accuracies[field] = accuracy
            
            # 只计算有真实值的字段
            if self._normalize_text(true_val):
                total_accuracy += accuracy
                valid_fields += 1
        
        # 整体准确率
        overall_accuracy = total_accuracy / valid_fields if valid_fields > 0 else 0
        
        return {
            'field_accuracies': field_accuracies,
            'overall_accuracy': overall_accuracy,
            'valid_fields_count': valid_fields
        }
    
    def evaluate_predictions(self, predictions_file: str, 
                           ground_truth_file: str) -> Dict[str, Any]:
        """评估预测结果
        
        Args:
            predictions_file: 预测结果文件（JSON格式）
            ground_truth_file: 真实标签文件（JSON格式）
            
        Returns:
            评估结果字典
        """
        logger.info("开始评估模型性能...")
        
        # 加载数据
        with open(predictions_file, 'r', encoding='utf-8') as f:
            predictions = json.load(f)
        
        with open(ground_truth_file, 'r', encoding='utf-8') as f:
            ground_truth = json.load(f)
        
        # 创建样本映射（基于图像名称）
        gt_dict = {}
        for item in ground_truth:
            image_name = Path(item.get('image_name', '')).stem
            if not image_name:
                continue
            gt_dict[image_name] = item
        
        # 评估每个预测结果
        sample_results = []
        field_accuracies = {field: [] for field in self.document_fields}
        
        for pred_item in predictions:
            # 获取图像名称
            image_path = pred_item.get('image_path', '')
            image_name = Path(image_path).stem
            
            if image_name not in gt_dict:
                logger.warning(f"未找到对应的真实标签: {image_name}")
                continue
            
            # 计算指标
            pred_fields = pred_item.get('extracted_fields', {})
            true_fields = gt_dict[image_name]
            
            metrics = self._calculate_sample_metrics(pred_fields, true_fields, self.document_fields)
            
            # 记录结果
            sample_result = {
                'image_name': image_name,
                'overall_accuracy': metrics['overall_accuracy'],
                'valid_fields_count': metrics['valid_fields_count'],
                'field_accuracies': metrics['field_accuracies'],
                'inference_time': pred_item.get('inference_time_seconds', 0)
            }
            sample_results.append(sample_result)
            
            # 累积字段准确率
            for field, accuracy in metrics['field_accuracies'].items():
                field_accuracies[field].append(accuracy)
        
        # 计算总体统计
        overall_stats = self._calculate_overall_statistics(sample_results, field_accuracies)
        
        # 生成评估报告
        evaluation_result = {
            'timestamp': datetime.now().isoformat(),
            'total_samples': len(sample_results),
            'overall_statistics': overall_stats,
            'sample_results': sample_results,
            'field_statistics': self._calculate_field_statistics(field_accuracies)
        }
        
        logger.info(f"评估完成，处理了 {len(sample_results)} 个样本")
        return evaluation_result
    
    def _calculate_overall_statistics(self, sample_results: List[Dict], 
                                    field_accuracies: Dict[str, List]) -> Dict[str, float]:
        """计算总体统计信息"""
        if not sample_results:
            return {}
        
        # 整体准确率统计
        overall_accuracies = [r['overall_accuracy'] for r in sample_results]
        inference_times = [r['inference_time'] for r in sample_results]
        
        # 字段级别统计
        all_field_accuracies = []
        for field_acc_list in field_accuracies.values():
            all_field_accuracies.extend(field_acc_list)
        
        # 计算精确率、召回率和F1分数（基于二值化准确率）
        binary_accuracies = [1 if acc >= 0.8 else 0 for acc in all_field_accuracies]
        precision = np.mean(binary_accuracies) if binary_accuracies else 0
        recall = precision  # 在这个上下文中，精确率和召回率是相同的
        f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
        
        return {
            'mean_overall_accuracy': np.mean(overall_accuracies),
            'std_overall_accuracy': np.std(overall_accuracies),
            'median_overall_accuracy': np.median(overall_accuracies),
            'mean_field_accuracy': np.mean(all_field_accuracies) if all_field_accuracies else 0,
            'std_field_accuracy': np.std(all_field_accuracies) if all_field_accuracies else 0,
            'precision': precision,
            'recall': recall,
            'f1_score': f1_score,
            'mean_inference_time': np.mean(inference_times),
            'std_inference_time': np.std(inference_times),
            'median_inference_time': np.median(inference_times),
            'samples_with_perfect_extraction': sum(1 for acc in overall_accuracies if acc >= 0.95),
            'samples_with_good_extraction': sum(1 for acc in overall_accuracies if acc >= 0.8),
            'perfect_extraction_rate': sum(1 for acc in overall_accuracies if acc >= 0.95) / len(overall_accuracies) if overall_accuracies else 0,
            'good_extraction_rate': sum(1 for acc in overall_accuracies if acc >= 0.8) / len(overall_accuracies) if overall_accuracies else 0
        }
    
    def _calculate_field_statistics(self, field_accuracies: Dict[str, List]) -> Dict[str, Dict[str, float]]:
        """计算各字段的统计信息"""
        field_stats = {}
        
        for field, accuracies in field_accuracies.items():
            if not accuracies:
                field_stats[field] = {
                    'mean_accuracy': 0.0,
                    'std_accuracy': 0.0,
                    'perfect_count': 0,
                    'perfect_rate': 0.0,
                    'total_samples': 0
                }
                continue
            
            perfect_count = sum(1 for acc in accuracies if acc >= 0.95)
            
            field_stats[field] = {
                'mean_accuracy': np.mean(accuracies),
                'std_accuracy': np.std(accuracies),
                'perfect_count': perfect_count,
                'perfect_rate': perfect_count / len(accuracies),
                'total_samples': len(accuracies)
            }
        
        return field_stats
    
    def save_evaluation_report(self, evaluation_result: Dict[str, Any], 
                             output_prefix: str = "evaluation") -> str:
        """保存评估报告"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 保存完整结果
        json_file = self.output_dir / f"{output_prefix}_{timestamp}.json"
        with open(json_file, 'w', encoding='utf-8') as f:
            json.dump(evaluation_result, f, ensure_ascii=False, indent=2)
        
        # 生成文本报告
        report_file = self.output_dir / f"{output_prefix}_report_{timestamp}.txt"
        self._generate_text_report(evaluation_result, report_file)
        
        # 生成Excel报告
        excel_file = self.output_dir / f"{output_prefix}_details_{timestamp}.xlsx"
        self._generate_excel_report(evaluation_result, excel_file)
        
        logger.info(f"评估报告已保存:")
        logger.info(f"  JSON文件: {json_file}")
        logger.info(f"  文本报告: {report_file}")
        logger.info(f"  Excel报告: {excel_file}")
        
        return str(json_file)
    
    def _generate_text_report(self, evaluation_result: Dict[str, Any], output_file: str):
        """生成文本格式的评估报告"""
        stats = evaluation_result['overall_statistics']
        field_stats = evaluation_result['field_statistics']
        sample_results = evaluation_result['sample_results']
        
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write("关键字段信息抽取 - 评估报告\n")
            f.write("=" * 50 + "\n\n")
            
            f.write(f"评估时间: {evaluation_result['timestamp']}\n")
            f.write(f"测试样本数: {evaluation_result['total_samples']}\n")
            f.write(f"文档类型: {self.document_type}\n")
            f.write(f"评估字段数: {len(self.document_fields)}\n\n")
            
            # 整体性能
            f.write("🔍 整体性能指标\n")
            f.write("-" * 30 + "\n")
            f.write(f"平均整体准确率: {stats['mean_overall_accuracy']:.3f} ± {stats['std_overall_accuracy']:.3f}\n")
            f.write(f"中位数准确率: {stats['median_overall_accuracy']:.3f}\n")
            f.write(f"平均字段准确率: {stats['mean_field_accuracy']:.3f} ± {stats['std_field_accuracy']:.3f}\n")
            f.write(f"精确率: {stats['precision']:.3f}\n")
            f.write(f"召回率: {stats['recall']:.3f}\n")
            f.write(f"F1分数: {stats['f1_score']:.3f}\n")
            f.write(f"完美提取率 (≥95%): {stats['perfect_extraction_rate']:.3f}\n")
            f.write(f"良好提取率 (≥80%): {stats['good_extraction_rate']:.3f}\n")
            f.write(f"平均推理时间: {stats['mean_inference_time']:.2f}s ± {stats['std_inference_time']:.2f}s\n")
            f.write(f"中位数推理时间: {stats['median_inference_time']:.2f}s\n\n")
            
            # 字段级别性能
            f.write("📋 字段级别性能\n")
            f.write("-" * 30 + "\n")
            for field, field_stat in field_stats.items():
                f.write(f"{field}:\n")
                f.write(f"  准确率: {field_stat['mean_accuracy']:.3f} ± {field_stat['std_accuracy']:.3f}\n")
                f.write(f"  完美提取: {field_stat['perfect_count']}/{field_stat['total_samples']} "
                       f"({field_stat['perfect_rate']:.3f})\n")
            
            # 错误分析
            f.write("\n❌ 错误分析\n")
            f.write("-" * 30 + "\n")
            self._analyze_errors(sample_results, f)
            
            f.write("\n" + "=" * 50 + "\n")
    
    def _analyze_errors(self, sample_results: List[Dict], file_handle):
        """分析错误模式"""
        # 统计低准确率样本
        low_accuracy_samples = [s for s in sample_results if s['overall_accuracy'] < 0.5]
        
        file_handle.write(f"低准确率样本数 (<50%): {len(low_accuracy_samples)}\n")
        
        if low_accuracy_samples:
            file_handle.write("最常见的低准确率样本特征:\n")
            # 分析低准确率样本的字段错误模式
            field_error_counts = {}
            for sample in low_accuracy_samples:
                for field, accuracy in sample['field_accuracies'].items():
                    if accuracy < 0.5:
                        field_error_counts[field] = field_error_counts.get(field, 0) + 1
            
            # 按错误频率排序
            sorted_errors = sorted(field_error_counts.items(), key=lambda x: x[1], reverse=True)
            for field, count in sorted_errors[:5]:  # 显示前5个最常见的错误字段
                error_rate = count / len(low_accuracy_samples) if len(low_accuracy_samples) > 0 else 0
                file_handle.write(f"  {field}: {count}次 ({error_rate:.1%})\n")
        
        # 统计推理时间异常样本
        inference_times = [s['inference_time'] for s in sample_results]
        if inference_times:
            mean_time = np.mean(inference_times)
            std_time = np.std(inference_times)
            slow_samples = [s for s in sample_results if s['inference_time'] > mean_time + 2 * std_time]
            
            file_handle.write(f"\n推理时间异常样本数 (>μ+2σ): {len(slow_samples)}\n")
            
            if slow_samples:
                avg_slow_time = np.mean([s['inference_time'] for s in slow_samples])
                file_handle.write(f"异常样本平均推理时间: {avg_slow_time:.2f}s\n")
        else:
            file_handle.write("\n推理时间异常样本数 (>μ+2σ): 0\n")
    
    def _generate_excel_report(self, evaluation_result: Dict[str, Any], output_file: str):
        """生成Excel格式的详细报告"""
        with pd.ExcelWriter(output_file, engine='openpyxl') as writer:
            # 整体统计
            overall_stats = pd.DataFrame([evaluation_result['overall_statistics']])
            overall_stats.to_excel(writer, sheet_name='整体统计', index=False)
            
            # 字段统计
            field_stats_df = pd.DataFrame(evaluation_result['field_statistics']).T
            field_stats_df.to_excel(writer, sheet_name='字段统计')
            
            # 样本详情
            samples_data = []
            for sample in evaluation_result['sample_results']:
                row = {
                    '图像名称': sample['image_name'],
                    '整体准确率': sample['overall_accuracy'],
                    '有效字段数': sample['valid_fields_count'],
                    '推理时间(秒)': sample['inference_time']
                }
                # 添加各字段准确率
                for field, accuracy in sample['field_accuracies'].items():
                    row[f'{field}_准确率'] = accuracy
                samples_data.append(row)
            
            samples_df = pd.DataFrame(samples_data)
            samples_df.to_excel(writer, sheet_name='样本详情', index=False)
    
    def plot_evaluation_results(self, evaluation_result: Dict[str, Any]):
        """绘制评估结果图表"""
        # 设置中文字体
        plt.rcParams['font.sans-serif'] = ['DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        
        # 创建图表
        fig, axes = plt.subplots(2, 3, figsize=(20, 12))
        
        # 1. 整体准确率分布
        overall_accuracies = [r['overall_accuracy'] for r in evaluation_result['sample_results']]
        axes[0, 0].hist(overall_accuracies, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
        axes[0, 0].set_title('整体准确率分布')
        axes[0, 0].set_xlabel('准确率')
        axes[0, 0].set_ylabel('样本数')
        
        # 2. 字段准确率对比
        field_stats = evaluation_result['field_statistics']
        fields = list(field_stats.keys())
        field_accuracies = [field_stats[field]['mean_accuracy'] for field in fields]
        
        axes[0, 1].bar(range(len(fields)), field_accuracies, color='lightgreen', alpha=0.7)
        axes[0, 1].set_title('各字段平均准确率')
        axes[0, 1].set_xlabel('字段')
        axes[0, 1].set_ylabel('平均准确率')
        axes[0, 1].set_xticks(range(len(fields)))
        axes[0, 1].set_xticklabels(fields, rotation=45, ha='right')
        
        # 3. 推理时间分布
        inference_times = [r['inference_time'] for r in evaluation_result['sample_results']]
        axes[0, 2].hist(inference_times, bins=20, alpha=0.7, color='orange', edgecolor='black')
        axes[0, 2].set_title('推理时间分布')
        axes[0, 2].set_xlabel('推理时间 (秒)')
        axes[0, 2].set_ylabel('样本数')
        
        # 4. 字段完美提取率
        perfect_rates = [field_stats[field]['perfect_rate'] for field in fields]
        axes[1, 0].bar(range(len(fields)), perfect_rates, color='coral', alpha=0.7)
        axes[1, 0].set_title('各字段完美提取率 (≥95%)')
        axes[1, 0].set_xlabel('字段')
        axes[1, 0].set_ylabel('完美提取率')
        axes[1, 0].set_xticks(range(len(fields)))
        axes[1, 0].set_xticklabels(fields, rotation=45, ha='right')
        
        # 5. 准确率与推理时间关系
        axes[1, 1].scatter(inference_times, overall_accuracies, alpha=0.6, color='purple')
        axes[1, 1].set_title('准确率 vs 推理时间')
        axes[1, 1].set_xlabel('推理时间 (秒)')
        axes[1, 1].set_ylabel('整体准确率')
        
        # 6. 字段标准差（稳定性）
        field_std_devs = [field_stats[field]['std_accuracy'] for field in fields]
        axes[1, 2].bar(range(len(fields)), field_std_devs, color='red', alpha=0.7)
        axes[1, 2].set_title('各字段准确率标准差')
        axes[1, 2].set_xlabel('字段')
        axes[1, 2].set_ylabel('标准差')
        axes[1, 2].set_xticks(range(len(fields)))
        axes[1, 2].set_xticklabels(fields, rotation=45, ha='right')
        
        plt.tight_layout()
        
        # 保存图表
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        plot_file = self.output_dir / f"evaluation_plots_{timestamp}.png"
        plt.savefig(plot_file, dpi=300, bbox_inches='tight')
        logger.info(f"评估图表已保存: {plot_file}")
        
        plt.show()
        
        # 生成额外的箱线图
        self._plot_boxplot(evaluation_result)
    
    def _plot_boxplot(self, evaluation_result: Dict[str, Any]):
        """绘制字段准确率箱线图"""
        # 收集各字段的准确率数据
        field_stats = evaluation_result['field_statistics']
        fields = list(field_stats.keys())
        
        # 从样本结果中收集每个字段的准确率
        field_accuracies_data = {field: [] for field in fields}
        for sample in evaluation_result['sample_results']:
            for field, accuracy in sample['field_accuracies'].items():
                field_accuracies_data[field].append(accuracy)
        
        # 绘制箱线图
        plt.figure(figsize=(15, 8))
        data = [field_accuracies_data[field] for field in fields]
        plt.boxplot(data, labels=fields)
        plt.title('各字段准确率分布箱线图')
        plt.xlabel('字段')
        plt.ylabel('准确率')
        plt.xticks(rotation=45, ha='right')
        plt.tight_layout()
        
        # 保存箱线图
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        boxplot_file = self.output_dir / f"field_accuracy_boxplot_{timestamp}.png"
        plt.savefig(boxplot_file, dpi=300, bbox_inches='tight')
        logger.info(f"字段准确率箱线图已保存: {boxplot_file}")
        
        plt.close()


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="通用信息抽取模型评估")
    parser.add_argument('--predictions', required=True, help='预测结果文件路径')
    parser.add_argument('--ground_truth', required=True, help='真实标签文件路径')
    parser.add_argument('--config', default='config/config.yaml', help='配置文件路径')
    parser.add_argument('--document_type', default='contract', 
                       choices=['contract', 'invoice', 'resume', 'medical_report', 'id_card'],
                       help='文档类型')
    parser.add_argument('--output_prefix', default='evaluation', help='输出文件前缀')
    parser.add_argument('--no_plots', action='store_true', help='不生成图表')
    parser.add_argument('--gpu_id', type=int, default=None, help='指定GPU ID（用于推理时）')
    
    args = parser.parse_args()
    
    # 设置GPU ID（如果指定了）
    if args.gpu_id is not None:
        import os
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
        logger.info(f"🔧 已设置CUDA_VISIBLE_DEVICES={args.gpu_id}")
    
    try:
        # 初始化评估器
        evaluator = UniversalDocumentEvaluator(args.config, args.document_type)
        
        # 执行评估
        results = evaluator.evaluate_predictions(args.predictions, args.ground_truth)
        
        # 打印文档类型信息
        logger.info(f"📄 评估文档类型: {args.document_type}")
        logger.info(f"🏷️  评估字段数: {len(evaluator.document_fields)}")
        
        # 保存报告
        output_file = evaluator.save_evaluation_report(results, args.output_prefix)
        
        # 生成图表
        if not args.no_plots:
            try:
                evaluator.plot_evaluation_results(results)
            except Exception as e:
                logger.warning(f"生成图表失败: {e}")
        
        # 打印摘要
        stats = results['overall_statistics']
        print(f"\n📊 评估摘要:")
        print(f"  测试样本数: {results['total_samples']}")
        print(f"  平均准确率: {stats['mean_overall_accuracy']:.3f}")
        print(f"  完美提取率: {stats['perfect_extraction_rate']:.3f}")
        print(f"  良好提取率: {stats['good_extraction_rate']:.3f}")
        print(f"  平均推理时间: {stats['mean_inference_time']:.2f}秒")
        
    except Exception as e:
        logger.error(f"评估失败: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()