"""
结果分析器

负责保存验证结果、分析结果并生成报告
"""

import json
import logging
import os
import re
from datetime import datetime
from typing import Dict, List, Any, Optional
from collections import defaultdict
from dataclasses import asdict

# 获取日志记录器
logger = logging.getLogger(__name__)

class ResultAnalyzer:
    """结果分析器"""
    
    def __init__(self, output_dir: str = "data/validation_results"):
        """
        初始化结果分析器
        
        Args:
            output_dir: 输出目录
        """
        self.output_dir = output_dir
        os.makedirs(output_dir, exist_ok=True)
        
        # 创建时间戳
        self.timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        
        logger.info(f"初始化结果分析器，输出目录: {output_dir}")
    
    def save_validation_results(self, results: List[Dict], filename: str = None) -> str:
        """
        保存验证结果到文件
        
        Args:
            results: 验证结果列表
            filename: 文件名，如果为None则自动生成
            
        Returns:
            保存的文件路径
        """
        if filename is None:
            filename = f"validation_results_{self.timestamp}.jsonl"
        
        file_path = os.path.join(self.output_dir, filename)
        
        try:
            with open(file_path, 'w', encoding='utf-8') as f:
                for result in results:
                    # 将ValidationResult对象转换为字典
                    if hasattr(result, '__dict__'):
                        result_dict = asdict(result)
                    else:
                        result_dict = result
                    
                    # 确保所有字段都存在
                    if 'processed_output' not in result_dict:
                        result_dict['processed_output'] = result_dict.get('model_output', '')
                    if 'think_content' not in result_dict:
                        result_dict['think_content'] = ''
                    
                    f.write(json.dumps(result_dict, ensure_ascii=False) + '\n')
            
            logger.info(f"验证结果已保存到: {file_path}")
            return file_path
            
        except Exception as e:
            logger.error(f"保存验证结果失败: {e}")
            raise
    
    def analyze_evaluation_results(self, results: List[Dict]) -> Dict[str, Any]:
        """
        分析评估结果
        
        Args:
            results: 验证结果列表
            
        Returns:
            分析结果字典
        """
        analysis = {
            'total_samples': 0,
            'evaluation_results': {
                'few_shot': {
                    'success': 0, 'failed': 0, 'scores': [],
                    'valid_count': 0, 'invalid_count': 0,
                    'label_distribution': {}, 'label_names': set()
                },
                'zero_shot': {
                    'success': 0, 'failed': 0, 'scores': [],
                    'valid_count': 0, 'invalid_count': 0,
                    'label_distribution': {}, 'label_names': set()
                }
            },

            'performance_comparison': {
                'evaluation_time': {'few_shot': [], 'zero_shot': []},

            }
        }
        
        # 从预标签中收集标签分布（所有prompt_type共享）
        label_distribution = {}
        label_names = set()
        
        for result in results:
            if not isinstance(result, dict):
                continue
                
            # 收集预标签信息
            input_content = result.get('input_content', {})
            if isinstance(input_content, dict):
                pre_label = input_content.get('pre_label', {})
                if isinstance(pre_label, dict):
                    label_id = pre_label.get('label_id')
                    label_name = pre_label.get('label_name')
                    if label_id is not None:
                        label_distribution[str(label_id)] = label_distribution.get(str(label_id), 0) + 1
                    if label_name:
                        label_names.add(label_name)
            
            analysis['total_samples'] += 1
            
            # 分析评估结果
            if result.get('task_type') == 'evaluation':
                prompt_type = result.get('prompt_type', 'unknown')
                if prompt_type in ['few_shot', 'zero_shot']:
                    if result.get('success'):
                        analysis['evaluation_results'][prompt_type]['success'] += 1
                        
                        # 尝试提取评估结果（优先从processed_output，其次从model_output）
                        try:
                            # 优先使用processed_output，如果没有则使用model_output
                            output_text = result.get('processed_output', '') or result.get('model_output', '')
                            if output_text:
                                # 尝试解析JSON输出
                                if '{' in output_text and '}' in output_text:
                                    json_start = output_text.find('{')
                                    json_end = output_text.rfind('}') + 1
                                    if json_start != -1 and json_end > json_start:
                                        json_str = output_text[json_start:json_end]
                                        parsed = json.loads(json_str)
                                        
                                        # 提取评分
                                        score = parsed.get('score')
                                        if score and isinstance(score, (int, float)):
                                            # 确保评分在0-5范围内
                                            if 0 <= score <= 5:
                                                analysis['evaluation_results'][prompt_type]['scores'].append(score)
                                                logger.debug(f"成功提取评分: {score}")
                                            else:
                                                logger.warning(f"评分超出范围0-5: {score}")
                                        else:
                                            logger.debug(f"未找到有效评分字段，JSON内容: {parsed}")
                                        
                                        # 提取有效性判断
                                        is_valid = parsed.get('is_valid')
                                        if is_valid is not None:
                                            if is_valid:
                                                analysis['evaluation_results'][prompt_type]['valid_count'] += 1
                                            else:
                                                analysis['evaluation_results'][prompt_type]['invalid_count'] += 1
                                            logger.debug(f"提取有效性判断: {is_valid}")
                                        
                                        # 标签信息从预标签中获取，不在这里提取
                                            
                                else:
                                    logger.debug(f"输出中未找到JSON格式: {output_text[:100]}...")
                            else:
                                logger.debug("输出内容为空")
                        except Exception as e:
                            logger.debug(f"解析评估结果失败: {e}")
                            # 尝试从文本中提取关键信息
                            try:
                                output_text = result.get('processed_output', '') or result.get('model_output', '')
                                
                                # 尝试提取评分
                                score_match = re.search(r'"score"\s*:\s*(\d+)', output_text)
                                if score_match:
                                    score = int(score_match.group(1))
                                    if 0 <= score <= 5:
                                        analysis['evaluation_results'][prompt_type]['scores'].append(score)
                                        logger.debug(f"通过正则提取评分: {score}")
                                
                                # 尝试提取有效性判断
                                valid_match = re.search(r'"is_valid"\s*:\s*(true|false)', output_text, re.IGNORECASE)
                                if valid_match:
                                    is_valid = valid_match.group(1).lower() == 'true'
                                    if is_valid:
                                        analysis['evaluation_results'][prompt_type]['valid_count'] += 1
                                    else:
                                        analysis['evaluation_results'][prompt_type]['invalid_count'] += 1
                                    logger.debug(f"通过正则提取有效性判断: {is_valid}")
                                
                                # 标签信息从预标签中获取，不在这里提取
                                    
                            except Exception as regex_error:
                                logger.debug(f"正则提取失败: {regex_error}")
                        
                        # 记录执行时间
                        execution_time = result.get('execution_time')
                        if execution_time:
                            analysis['performance_comparison']['evaluation_time'][prompt_type].append(execution_time)
                    else:
                        analysis['evaluation_results'][prompt_type]['failed'] += 1
            

        
        # 计算统计信息
        for prompt_type in ['few_shot', 'zero_shot']:
            # 评估成功率
            eval_total = (analysis['evaluation_results'][prompt_type]['success'] + 
                         analysis['evaluation_results'][prompt_type]['failed'])
            if eval_total > 0:
                analysis['evaluation_results'][prompt_type]['success_rate'] = (
                    analysis['evaluation_results'][prompt_type]['success'] / eval_total * 100
                )
            

            
            # 平均评分
            scores = analysis['evaluation_results'][prompt_type]['scores']
            if scores:
                analysis['evaluation_results'][prompt_type]['avg_score'] = sum(scores) / len(scores)
                analysis['evaluation_results'][prompt_type]['min_score'] = min(scores)
                analysis['evaluation_results'][prompt_type]['max_score'] = max(scores)
            
            # 标签分布（从预标签中获取，所有prompt_type共享）
            analysis['evaluation_results'][prompt_type]['label_distribution'] = label_distribution.copy()
            analysis['evaluation_results'][prompt_type]['label_names'] = label_names.copy()
            
            # 平均执行时间
            for task_type in ['evaluation_time']:
                times = analysis['performance_comparison'][task_type][prompt_type]
                if times:
                    analysis['performance_comparison'][task_type][f'{prompt_type}_avg'] = sum(times) / len(times)
        
        return analysis
    
    def generate_analysis_report(self, analysis: Dict[str, Any], filename: str = None) -> str:
        """
        生成分析报告
        
        Args:
            analysis: 分析结果
            filename: 报告文件名
            
        Returns:
            报告文件路径
        """
        if filename is None:
            filename = f"analysis_report_{self.timestamp}.txt"
        
        file_path = os.path.join(self.output_dir, filename)
        
        try:
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write("数据集验证结果分析报告\n")
                f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write("="*80 + "\n\n")
                
                # 总体统计
                f.write("## 总体统计\n")
                f.write(f"总样本数: {analysis['total_samples']}\n\n")
                
                # 评估结果对比
                f.write("## 评估结果对比\n")
                for prompt_type in ['few_shot', 'zero_shot']:
                    f.write(f"\n### {prompt_type.upper()}\n")
                    eval_stats = analysis['evaluation_results'][prompt_type]
                    f.write(f"  成功: {eval_stats['success']}\n")
                    f.write(f"  失败: {eval_stats['failed']}\n")
                    if 'success_rate' in eval_stats:
                        f.write(f"  成功率: {eval_stats['success_rate']:.1f}%\n")
                    
                    # 有效性统计
                    if 'valid_count' in eval_stats and 'invalid_count' in eval_stats:
                        total_valid = eval_stats['valid_count'] + eval_stats['invalid_count']
                        if total_valid > 0:
                            valid_rate = eval_stats['valid_count'] / total_valid * 100
                            f.write(f"  有效修改: {eval_stats['valid_count']} ({valid_rate:.1f}%)\n")
                            f.write(f"  无效修改: {eval_stats['invalid_count']} ({100-valid_rate:.1f}%)\n")
                    
                    # 评分统计
                    if 'avg_score' in eval_stats:
                        f.write(f"  平均评分: {eval_stats['avg_score']:.2f}\n")
                        f.write(f"  评分范围: {eval_stats['min_score']} - {eval_stats['max_score']}\n")
                    
                    # 标签分布
                    if 'label_distribution' in eval_stats and eval_stats['label_distribution']:
                        f.write(f"  标签分布: {dict(eval_stats['label_distribution'])}\n")
                    
                    # 标签名称
                    if 'label_names' in eval_stats and eval_stats['label_names']:
                        f.write(f"  涉及标签: {', '.join(sorted(eval_stats['label_names']))}\n")
                
                # 性能对比
                f.write("\n## 性能对比\n")
                perf = analysis['performance_comparison']
                
                f.write("\n### 评估任务执行时间\n")
                for prompt_type in ['few_shot', 'zero_shot']:
                    avg_key = f'{prompt_type}_avg'
                    if avg_key in perf['evaluation_time']:
                        f.write(f"  {prompt_type}: {perf['evaluation_time'][avg_key]:.3f}秒\n")
                

            
            logger.info(f"分析报告已生成: {file_path}")
            return file_path
            
        except Exception as e:
            logger.error(f"生成分析报告失败: {e}")
            raise
    
    def create_comparison_data(self, results: List[Dict]) -> Dict[str, Any]:
        """
        创建对比数据结构
        
        Args:
            results: 验证结果列表
            
        Returns:
            对比数据字典
        """
        comparison_data = {
            'summary': {
                'total_samples': 0,
                'evaluation_tasks': 0,

            },
            'sample_comparisons': [],
            'task_performance': {
                'evaluation': {
                    'few_shot': {'success_count': 0, 'failed_count': 0, 'avg_score': 0, 'avg_time': 0},
                    'zero_shot': {'success_count': 0, 'failed_count': 0, 'avg_score': 0, 'avg_time': 0}
                },

            }
        }
        
        # 按样本ID分组结果
        sample_groups = {}
        for result in results:
            if not isinstance(result, dict):
                continue
                
            sample_id = result.get('sample_id', 'unknown')
            if sample_id not in sample_groups:
                sample_groups[sample_id] = {}
            
            task_key = f"{result.get('task_type', 'unknown')}_{result.get('prompt_type', 'unknown')}"
            sample_groups[sample_id][task_key] = result
        
        # 构建样本对比数据
        for sample_id, sample_results in sample_groups.items():
            # 提取原始样本信息
            sample_input = None
            for result in sample_results.values():
                if result and result.get('input_content'):
                    sample_input = result['input_content']
                    break
            
            sample_comparison = {
                'sample_id': sample_id,
                # 添加原始修改内容信息
                'sample_content': {
                    'file_path': sample_input.get('file_path', '') if sample_input else '',
                    'remove_content': sample_input.get('remove_content', '') if sample_input else '',
                    'add_content': sample_input.get('add_content', '') if sample_input else '',
                    'context_before': sample_input.get('context_before', '') if sample_input else '',
                    'context_after': sample_input.get('context_after', '') if sample_input else '',
                    'change_type': sample_input.get('change_type', '') if sample_input else ''
                },
                'evaluation_comparison': {
                    'few_shot': self._extract_evaluation_result(sample_results.get('evaluation_few_shot', {})),
                    'zero_shot': self._extract_evaluation_result(sample_results.get('evaluation_zero_shot', {}))
                },

            }
            comparison_data['sample_comparisons'].append(sample_comparison)
        
        # 计算总体性能统计
        comparison_data['summary']['total_samples'] = len(sample_groups)
        comparison_data['summary']['evaluation_tasks'] = len(sample_groups) * 2  # few-shot + zero-shot

        
        # 计算任务性能统计
        self._calculate_task_performance(comparison_data, sample_groups)
        
        return comparison_data
    
    def _extract_evaluation_result(self, result: Dict) -> Dict[str, Any]:
        """提取评估结果的关键信息，解析JSON输出"""
        if not result:
            return {'success': False, 'error': 'No result'}
        
        extracted = {
            'success': result.get('success', False),
            'execution_time': result.get('execution_time', 0),
            'error': result.get('error', '')
        }
        
        # 尝试解析 processed_output 中的 JSON
        processed_output = result.get('processed_output', '')
        if processed_output:
            try:
                # 清理可能的 markdown 格式
                json_str = processed_output.strip()
                if json_str.startswith('```json'):
                    json_str = json_str[7:]
                if json_str.startswith('```'):
                    json_str = json_str[3:]
                if json_str.endswith('```'):
                    json_str = json_str[:-3]
                json_str = json_str.strip()
                
                # 解析 JSON
                parsed_result = json.loads(json_str)
                
                # 添加解析后的字段
                extracted.update({
                    'is_valid': parsed_result.get('is_valid'),
                    'valid_reason': parsed_result.get('valid_reason', ''),
                    'score': parsed_result.get('score', 0),
                    'score_details': parsed_result.get('score_details', ''),
                })
                
            except (json.JSONDecodeError, Exception) as e:
                # JSON 解析失败，回退到原来的方法
                extracted.update({
                    'score': self._extract_score_from_output(processed_output),
                    'parse_error': f'JSON解析失败: {str(e)}',
                    'raw_output': processed_output
                })
        else:
            extracted['score'] = 0
        
        return extracted
    

    
    def _extract_score_from_output(self, output: str) -> float:
        """从输出中提取评分"""
        try:
            import re
            # 尝试从JSON中提取score字段
            score_match = re.search(r'"score":\s*(\d+(?:\.\d+)?)', output)
            if score_match:
                return float(score_match.group(1))
        except:
            pass
        return 0.0
    
    def _extract_reason_from_output(self, output: str) -> str:
        """从输出中提取原因说明"""
        try:
            import re
            # 尝试从JSON中提取reason字段
            reason_match = re.search(r'"reason":\s*"([^"]+)"', output)
            if reason_match:
                return reason_match.group(1)
        except:
            pass
        return ""
    
    def _calculate_task_performance(self, comparison_data: Dict, sample_groups: Dict):
        """计算任务性能统计"""
        evaluation_scores = {'few_shot': [], 'zero_shot': []}
        evaluation_times = {'few_shot': [], 'zero_shot': []}

        
        for sample_results in sample_groups.values():
            # 评估任务统计
            for prompt_type in ['few_shot', 'zero_shot']:
                eval_result = sample_results.get(f'evaluation_{prompt_type}', {})
                if eval_result.get('success'):
                    comparison_data['task_performance']['evaluation'][prompt_type]['success_count'] += 1
                    score = self._extract_score_from_output(eval_result.get('processed_output', ''))
                    if score > 0:
                        evaluation_scores[prompt_type].append(score)
                    evaluation_times[prompt_type].append(eval_result.get('execution_time', 0))
                else:
                    comparison_data['task_performance']['evaluation'][prompt_type]['failed_count'] += 1
            

        
        # 计算平均值
        for prompt_type in ['few_shot', 'zero_shot']:
            if evaluation_scores[prompt_type]:
                comparison_data['task_performance']['evaluation'][prompt_type]['avg_score'] = sum(evaluation_scores[prompt_type]) / len(evaluation_scores[prompt_type])
            if evaluation_times[prompt_type]:
                comparison_data['task_performance']['evaluation'][prompt_type]['avg_time'] = sum(evaluation_times[prompt_type]) / len(evaluation_times[prompt_type])

    
    def save_comparison_data(self, comparison_data: Dict[str, Any], filename: str = None) -> str:
        """
        保存对比数据
        
        Args:
            comparison_data: 对比数据字典
            filename: 文件名
            
        Returns:
            保存的文件路径
        """
        if filename is None:
            filename = f"comparison_data_{self.timestamp}.json"
        
        file_path = os.path.join(self.output_dir, filename)
        
        try:
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(comparison_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"对比数据已保存到: {file_path}")
            return file_path
            
        except Exception as e:
            logger.error(f"保存对比数据失败: {e}")
            raise
    
    def print_summary(self, analysis: Dict[str, Any]):
        """打印分析摘要"""
        print("\n" + "="*80)
        print("数据集验证结果摘要")
        print("="*80)
        
        print(f"总样本数: {analysis['total_samples']}")
        
        print("\n评估任务成功率:")
        for prompt_type in ['few_shot', 'zero_shot']:
            rate = analysis['evaluation_results'][prompt_type].get('success_rate', 0)
            print(f"  {prompt_type}: {rate:.1f}%")
        

        
        print("\n评估质量对比:")
        for prompt_type in ['few_shot', 'zero_shot']:
            avg_score = analysis['evaluation_results'][prompt_type].get('avg_score', 0)
            if avg_score > 0:
                print(f"  {prompt_type} 平均评分: {avg_score:.2f}")
        
        print("="*80)

    def get_new_results_file_path(self, filename: str = None) -> str:
        """
        生成新的结果文件路径（不创建文件，仅返回路径）
        """
        if filename is None:
            filename = f"validation_results_{self.timestamp}.jsonl"
        return os.path.join(self.output_dir, filename)

    def append_validation_result(self, file_path: str, result: Dict) -> None:
        """
        以追加方式写入单条验证结果（JSONL一行）
        """
        try:
            # 将对象转为字典
            if hasattr(result, '__dict__'):
                from dataclasses import asdict
                result_dict = asdict(result)
            else:
                result_dict = result

            # 兜底补齐新字段
            if 'processed_output' not in result_dict:
                result_dict['processed_output'] = result_dict.get('model_output', '')
            if 'think_content' not in result_dict:
                result_dict['think_content'] = ''

            # 追加写入
            with open(file_path, 'a', encoding='utf-8') as f:
                f.write(json.dumps(result_dict, ensure_ascii=False) + '\n')
        except Exception as e:
            logger.error(f"追加写入验证结果失败: {e}")
            raise

    def load_results(self, file_path: str) -> List[Dict]:
        """
        读取JSONL结果文件为列表
        """
        loaded: List[Dict] = []
        try:
            if not os.path.exists(file_path):
                return loaded
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if not line:
                        continue
                    try:
                        loaded.append(json.loads(line))
                    except json.JSONDecodeError:
                        continue
            return loaded
        except Exception as e:
            logger.error(f"读取结果文件失败: {e}")
            raise

    # ========================= 新增：few-shot vs zero-shot 差异样本报告 =========================
    def generate_score_diff_report(self, results: List[Dict], filename: str = None, score_threshold: float = 0.0) -> str:
        """生成 few-shot 与 zero-shot 评分不同样本的对比报告（TXT）。

        展示每条样本的关键信息：
        - 文件路径、样本ID
        - 原始文档（context_before + remove_content + context_after）
        - 修改后文档（context_before + add_content + context_after）
        - few-shot 与 zero-shot 的评分/有效性/理由简述

        Args:
            results: validate_sample 产生的结果列表（JSONL加载后）
            filename: 输出文件名，默认自动带时间戳
            score_threshold: 分数差阈值（默认 >0 即输出）
        Returns:
            生成的报告路径
        """
        if filename is None:
            filename = f"score_diff_report_{self.timestamp}.txt"
        out_path = os.path.join(self.output_dir, filename)

        # 将结果按 sample_id 分组
        groups: Dict[str, Dict[str, Dict]] = {}
        for r in results:
            if not isinstance(r, dict):
                continue
            if r.get('task_type') != 'evaluation':
                continue
            sid = r.get('sample_id', 'unknown')
            ptype = r.get('prompt_type', 'unknown')
            groups.setdefault(sid, {})[ptype] = r

        def _score_of(res: Dict) -> float:
            return self._extract_score_from_output((res or {}).get('processed_output', ''))

        def _is_valid_of(res: Dict) -> Optional[bool]:
            txt = (res or {}).get('processed_output', '')
            try:
                import json as _json
                if '{' in txt and '}' in txt:
                    js = txt[txt.find('{'): txt.rfind('}')+1]
                    parsed = _json.loads(js)
                    return bool(parsed.get('is_valid')) if parsed.get('is_valid') is not None else None
            except Exception:
                pass
            return None

        with open(out_path, 'w', encoding='utf-8') as f:
            f.write("few-shot vs zero-shot 评分差异样本对比\n")
            f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write("="*80 + "\n\n")

            for sid, m in groups.items():
                fs = m.get('few_shot')
                zs = m.get('zero_shot')
                if not fs or not zs:
                    continue
                s_fs = _score_of(fs)
                s_zs = _score_of(zs)
                if abs(s_fs - s_zs) <= score_threshold:
                    continue

                # 提取样本内容
                sample = fs.get('input_content') or zs.get('input_content') or {}
                file_path = (sample.get('file_path') or 'unknown_file')
                context_before = (sample.get('context_before') or '').strip()
                remove_content = (sample.get('remove_content') or '').strip()
                add_content = (sample.get('add_content') or '').strip()
                context_after = (sample.get('context_after') or '').strip()

                # 组织展示文本
                original_doc = ""
                if context_before:
                    original_doc += context_before + "\n"
                if remove_content:
                    original_doc += remove_content + "\n"
                if context_after:
                    original_doc += context_after + "\n"

                modified_doc = ""
                if context_before:
                    modified_doc += context_before + "\n"
                if add_content:
                    modified_doc += add_content + "\n"
                if context_after:
                    modified_doc += context_after + "\n"

                # 有效性与理由（各取前 300 字简述）
                valid_fs = _is_valid_of(fs)
                valid_zs = _is_valid_of(zs)
                reason_fs = self._extract_reason_from_output(fs.get('processed_output', ''))[:300]
                reason_zs = self._extract_reason_from_output(zs.get('processed_output', ''))[:300]

                f.write(f"样本: {sid}\n")
                f.write(f"文件: {file_path}\n")
                f.write(f"few-shot 分数: {s_fs:.2f} | zero-shot 分数: {s_zs:.2f} | 差值: {s_fs - s_zs:+.2f}\n")
                if valid_fs is not None or valid_zs is not None:
                    f.write(f"有效性: few-shot={valid_fs} | zero-shot={valid_zs}\n")
                f.write("-"*60 + "\n")
                f.write("【原始文档】\n")
                f.write((original_doc or "<空>") + "\n")
                f.write("【修改后文档】\n")
                f.write((modified_doc or "<空>") + "\n")
                f.write("【评估对比】\n")
                f.write(f"few-shot 理由: {reason_fs or '<无>'}\n")
                f.write(f"zero-shot 理由: {reason_zs or '<无>'}\n")
                f.write("="*80 + "\n\n")

        logger.info(f"评分差异对比报告已生成: {out_path}")
        return out_path


def _find_latest_results_file(default_dir: str = "data/validation_results") -> Optional[str]:
    """在默认目录中查找最新的 validation_results_*.jsonl 文件"""
    try:
        if not os.path.exists(default_dir):
            return None
        candidates = []
        for name in os.listdir(default_dir):
            if name.startswith("validation_results_") and name.endswith(".jsonl"):
                path = os.path.join(default_dir, name)
                candidates.append((os.path.getmtime(path), path))
        if not candidates:
            return None
        candidates.sort(key=lambda x: x[0], reverse=True)
        return candidates[0][1]
    except Exception:
        return None


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="验证结果分析器 - 读取结果并生成报告/对比数据/评分差异对比")
    parser.add_argument("--results-file", "-r", type=str, default=None,
                        help="结果文件路径（JSONL）。缺省时自动从 data/validation_results 中选择最新文件")
    parser.add_argument("--out-dir", "-o", type=str, default="data/validation_results",
                        help="分析输出目录")
    parser.add_argument("--gen-score-diff", action="store_true",
                        help="同时生成 few-shot vs zero-shot 评分差异对比报告")
    parser.add_argument("--score-threshold", type=float, default=0.0,
                        help="仅当两者分数差绝对值大于该阈值时才输出对比，默认0（不相等即输出）")
    args = parser.parse_args()

    results_file = args.results_file or _find_latest_results_file(args.out_dir)
    if not results_file:
        print("未找到结果文件。请通过 --results-file 指定，或确保 data/validation_results 下存在 validation_results_*.jsonl。")
        raise SystemExit(1)

    analyzer = ResultAnalyzer(args.out_dir)
    results = analyzer.load_results(results_file)
    analysis = analyzer.analyze_evaluation_results(results)
    analyzer.generate_analysis_report(analysis)
    analyzer.save_comparison_data(analyzer.create_comparison_data(results))
    if args.gen_score_diff:
        analyzer.generate_score_diff_report(results, score_threshold=args.score_threshold)
    analyzer.print_summary(analysis)
    print(f"分析完成：{args.out_dir}")
