#!/usr/bin/env python3
# SPDX-License-Identifier: MulanPSL-2.0+
# Batch query and analyze jobs submitted by linux_auto_test.py

import os
import sys
import time
import argparse
from datetime import datetime, timedelta
import csv
import json
from collections import defaultdict

# 添加 bisect 模块路径
sys.path.append(os.path.join(os.environ['LKP_SRC'], 'programs/bisect-py'))

# 导入 ManticoreClient
from manticore_simple import ManticoreClient

# Status name to ID mapping
JOB_DATA_READINESS_NAME2ID = {
    "N/A": 0,
    "uploading": 1,
    "uploaded": 2,
    "complete": 3,
    "incomplete": 4,
    "norun": 5
}

JOB_STAGE_NAME2ID = {
    "submit": 0, "dispatch": 1, "boot": 2, "setup": 3, "wait_peer": 4, "running": 5,
    "post_run": 6, "manual_check": 7, "renew": 8, "finish": 9, "cancel": 10,
    "abort_invalid": 11, "abort_wait": 12, "abort_provider": 13,
}
JOB_HEALTH_NAME2ID = {
    "unknown": 0, "success": 1, "fail": 2, "cancel": 10, "wget_kernel_fail": 20,
    "wget_initrd_fail": 21, "initrd_broken": 22, "load_disk_fail": 23, "error_mount": 24,
    "microcode_mismatch": 25, "abort_invalid": 30, "abort_wait": 31, "abort_provider": 32,
    "abort": 33, "soft_timeout": 40, "nfs_hang": 41, "oom": 42, "kernel_panic": 43,
    "terminate": 44, "disturbed": 45, "timeout_dispatch": 50, "timeout_boot": 51,
    "timeout_setup": 52, "timeout_wait_peer": 53, "timeout_running": 54,
    "timeout_post_run": 55, "timeout_manual_check": 56, "timeout_renew": 57,
}

def format_stage(stage):
    if stage in JOB_STAGE_NAME2ID:
        return f"{stage}({JOB_STAGE_NAME2ID[stage]})"
    return str(stage)

def format_health(health):
    if health in JOB_HEALTH_NAME2ID:
        return f"{health}({JOB_HEALTH_NAME2ID[health]})"
    return str(health)

def format_data_readiness(readiness):
    if readiness in JOB_DATA_READINESS_NAME2ID:
        return f"{readiness}({JOB_DATA_READINESS_NAME2ID[readiness]})"
    return str(readiness)

# Logging configuration
import logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[logging.StreamHandler(sys.stdout)]
)
logger = logging.getLogger('batch_query')

class BatchJobQuery:
    """Batch job results query and analysis tool"""
    
    def __init__(self, db_config=None):
        # Database configuration (using environment variables or defaults)
        db_config = db_config or {}
        host = db_config.get('host', os.getenv('MANTICORE_HOST', 'localhost'))
        port = int(db_config.get('port', os.getenv('MANTICORE_PORT', '9306')))
        # HTTP 端口通常是 9308
        if port == 9306:
            port = 9308
        
        self.client = ManticoreClient(host=host, port=port)
        self.database = os.getenv('MANTICORE_DB', 'jobs')
        logger.info(f"Manticore HTTP client initialized: {host}:{port}")

    def query_jobs(self, hours=24, suite=None, testbox=None, health=None, stage=None,
                  commit=None, metric=None, limit=1000, show_errid=False):
        """
        查询批量提交的任务 (使用 HTTP API)
        """
        try:
            # 计算时间范围
            cutoff_time = datetime.now() - timedelta(hours=hours)
            unix_cutoff = int(cutoff_time.timestamp())

            # 构建 SQL WHERE 子句
            where_clauses = [f"submit_time >= {unix_cutoff}"]

            # 如果有 kernel_ci 参数，使用 SQL 查询 JSON 字段
            # 因为 kernel-ci 不在 full_text_kv 中，需要直接查询 j 字段
            if kernel_ci:
                # 使用 JSON_VALUE 或 json.kernel-ci 语法访问嵌套字段
                # Manticore 支持: json_field.nested_field = 'value'
                where_clauses.append(f"j.`kernel-ci` = '{kernel_ci}'")
                logger.debug(f"Added kernel-ci filter: {kernel_ci}")

            # suite 在 full_text_kv 中，也在顶层字段中
            if suite:
                where_clauses.append(f"suite = '{suite}'")

            if testbox:
                where_clauses.append(f"testbox = '{testbox}'")

            if health:
                # job_health 在 j 字段中
                where_clauses.append(f"j.job_health = '{health}'")

            if stage:
                # job_stage 在 j 字段中
                where_clauses.append(f"j.job_stage = '{stage}'")

            if commit:
                # commit 在 j 字段的嵌套结构中
                if suite == 'makepkg':
                    where_clauses.append(f"j.program.makepkg.commit = '{commit}'")
                else:
                    where_clauses.append(f"j.ss.linux.commit = '{commit}'")

            # metric 查询需要检查 stats 字段中是否存在该键
            # 这在 SQL 中比较复杂，暂时保持原有逻辑
            if metric:
                logger.warning("Metric filtering not fully implemented in SQL mode")

            # 构建完整的 SQL 查询
            where_clause = " AND ".join(where_clauses)
            sql_query = f"""
            SELECT * FROM {self.database}
            WHERE {where_clause}
            ORDER BY submit_time DESC
            LIMIT {limit}
            """

            logger.debug(f"Executing Manticore SQL query: {sql_query}")

            # 使用 sql_select 方法执行查询
            results = self.client.sql_select(sql_query)

            if results is None:
                logger.error("Query failed, client returned None")
                return []

            # Process results
            processed_jobs = []
            for hit in results:
                job_data = hit.get('j', {})
                if isinstance(job_data, str):
                    job_data = json.loads(job_data)

                job = {
                    'job_id': str(hit.get('id')),
                    'submit_time': datetime.fromtimestamp(hit.get('submit_time')).strftime('%Y-%m-%d %H:%M:%S'),
                    'result_root': job_data.get('result_root', ''),
                    'j': job_data, # Keep original 'j' for backward compatibility
                    'suite': job_data.get('suite'),
                    'testbox': job_data.get('testbox'),
                    'job_health': job_data.get('job_health'),
                    'job_stage': job_data.get('job_stage'),
                    'full_text_kv': hit.get('full_text_kv', ''),
                    'commit': self._extract_commit(job_data),
                    'host_machine': job_data.get('host_machine'),
                    'job_data_readiness': job_data.get('job_data_readiness', 'N/A'),
                    'git_repo': self._extract_github_repo(hit.get('full_text_kv', '')),
                    'unixbench_test': self._extract_unixbench_test(hit.get('full_text_kv', ''))
                }
                
                if show_errid:
                    job['errid'] = job_data.get('errid')
                
                processed_jobs.append(job)

            return processed_jobs
            
        except Exception as err:
            logger.error(f"Query failed: {err}", exc_info=True)
            return []
    
    def _extract_commit(self, job_data):
        """从任务数据中提取提交哈希"""
        try:
            if job_data.get('ss', {}).get('linux', {}).get('commit'):
                return job_data['ss']['linux']['commit']
            if job_data.get('program', {}).get('makepkg', {}).get('commit'):
                return job_data['program']['makepkg']['commit']
            for prog in job_data.get('program', {}).values():
                if isinstance(prog, dict) and 'commit' in prog:
                    return prog['commit']
            return None
        except Exception:
            return None
    
    def _extract_github_repo(self, full_text_kv):
        """从 full_text_kv 中提取 Git 仓库名"""
        if not full_text_kv:
            return None
        
        try:
            import re
            # 查找 ss.linux._url= 或 pp.makepkg._url= 模式
            url_match = re.search(r'(?:ss.linux._url|pp.makepkg._url)=([^\s]+)', full_text_kv)
            if url_match:
                url = url_match.group(1)
                # 从 URL 中提取仓库名
                # 支持格式: git+https://mirrors.tuna.tsinghua.edu.cn/git/linux-stable.git
                # 或者: https://github.com/user/repo.git
                repo_match = re.search(r'/([^/]+?)(?:.git)?/?$', url)
                if repo_match:
                    return repo_match.group(1)
            return None
        except:
            return None
    
    def _extract_unixbench_test(self, full_text_kv):
        """从 full_text_kv 中提取 unixbench 测试项"""
        if not full_text_kv:
            return None
        
        try:
            import re
            # 查找 pp.unixbench.test= 模式
            test_match = re.search(r'pp.unixbench.test=([^\s]+)', full_text_kv)
            if test_match:
                return test_match.group(1)
            return None
        except:
            return None
    
    def query_jobs_by_commits(self, head_commit, base_commit, hours=168, suite=None, testbox=None, limit=1000):
        """
        查询两个不同 commit 的任务数据用于对比
        
        :param head_commit: head commit 标识
        :param base_commit: base commit 标识  
        :param hours: 查询最近多少小时内的任务
        :param suite: 筛选特定的测试套件
        :param testbox: 筛选特定的测试机类型
        :param limit: 返回结果数量限制
        :return: (head_jobs, base_jobs) 元组
        """
        head_jobs = self.query_jobs(hours=hours, suite=suite, testbox=testbox, 
                                  commit=head_commit, limit=limit)
        base_jobs = self.query_jobs(hours=hours, suite=suite, testbox=testbox,
                                  commit=base_commit, limit=limit)
        
        return head_jobs, base_jobs
    
    def _group_jobs_for_comparison(self, jobs):
        """按 git_repo + unixbench_test + testbox 分组任务"""
        from collections import defaultdict
        groups = defaultdict(list)
        
        for job in jobs:
            if job.get('job_health') != 'success' or job.get('job_stage') != 'finish':
                continue  # 只处理成功完成的任务
                
            git_repo = job.get('git_repo')
            unixbench_test = job.get('unixbench_test') 
            testbox = job.get('testbox')
            
            # 跳过没有 git_repo 信息的任务，确保对比在相同仓库下进行
            if not git_repo:
                logger.debug(f"Skipping job {job['job_id']} - no git_repo information")
                continue
            
            # 对于 unixbench 套件，需要有具体的测试项
            if job.get('suite') == 'unixbench' and unixbench_test:
                group_key = (git_repo, unixbench_test, testbox)
                groups[group_key].append(job)
            # 对于其他套件，直接使用 suite 作为测试项
            elif job.get('suite') != 'unixbench':
                group_key = (git_repo, job.get('suite'), testbox)
                groups[group_key].append(job)
                
        return groups
    
    def _call_compare_command(self, head_job_ids, base_job_ids):
        """调用 compare 命令进行对比"""
        try:
            import subprocess
            import os
            
            # 构建 compare 命令
            cci_src = os.getenv('CCI_SRC', '/c/lkp-tests')  # 默认路径
            compare_cmd = f"{cci_src}/sbin/compare"
            
            head_ids = ','.join(head_job_ids)
            base_ids = ','.join(base_job_ids)
            
            cmd = [compare_cmd, f"id={head_ids}", f"id={base_ids}"]
            
            logger.debug(f"Executing compare command: {' '.join(cmd)}")
            
            # 执行命令
            result = subprocess.run(cmd, capture_output=True, text=True, timeout=300)
            
            if result.returncode == 0:
                return result.stdout
            else:
                logger.error(f"Compare command failed: {result.stderr}")
                return None
                
        except subprocess.TimeoutExpired:
            logger.error("Compare command timed out")
            return None
        except Exception as e:
            logger.error(f"Error calling compare command: {str(e)}")
            return None
    
    def _parse_compare_output(self, compare_output):
        """解析 compare 命令的输出，提取指标和标准差信息"""
        if not compare_output:
            return []
        
        results = []
        lines = compare_output.strip().split('\n')
        
        logger.debug(f"Parsing compare output with {len(lines)} lines")
        
        # 跳过表头，找到数据行
        data_started = False
        for i, line in enumerate(lines):
            line = line.strip()
            if not line:
                continue
                
            # 检查是否是表头分隔符
            if '----' in line and not data_started:
                data_started = True
                logger.debug(f"Data section started at line {i}")
                continue
                
            if not data_started:
                continue
            
            # 检查行是否包含 unixbench
            if 'unixbench.' in line:
                logger.debug(f"Found unixbench line: {line}")
                
                # 使用更灵活的正则表达式，支持多种格式
                import re
                
                # 格式1: base_value ± stddev%   change%   head_value ± stddev%   unixbench.metric
                # 示例: 820.03 ± 17%       -0.0%        819.67 ± 17%    unixbench.System_Call_Overhead
                pattern1 = r'^\s*(\d+\.?\d*)\s*±\s*(\d+)%\s*([+-]?\d+\.?\d*%?)\s+(\d+\.?\d*)\s*±\s*(\d+)%\s+(unixbench\.\S+)'
                match1 = re.search(pattern1, line)
                
                # 格式2: base_value   change%   head_value ± stddev%   unixbench.metric  
                # 示例: 733.90            +45.1%       1065.13 ± 28%    unixbench.File_Copy_256_bufsize_500_maxblocks
                pattern2 = r'^\s*(\d+\.?\d*)\s*([+-]?\d+\.?\d*%?)\s+(\d+\.?\d*)\s*±\s*(\d+)%\s+(unixbench\.\S+)'
                match2 = re.search(pattern2, line)
                
                # 格式3: base_value ± stddev%   change%   head_value   unixbench.metric
                # 示例: 820.03 ± 17%       -0.0%        819.67    unixbench.System_Call_Overhead
                pattern3 = r'^\s*(\d+\.?\d*)\s*±\s*(\d+)%\s*([+-]?\d+\.?\d*%?)\s+(\d+\.?\d*)\s+(unixbench\.\S+)'
                match3 = re.search(pattern3, line)
                
                # 格式4: base_value   change%   head_value   unixbench.metric (无标准差)
                # 示例: 733.90            +45.1%       1065.13    unixbench.File_Copy_256_bufsize_500_maxblocks
                pattern4 = r'^\s*(\d+\.?\d*)\s*([+-]?\d+\.?\d*%?)\s+(\d+\.?\d*)\s+(unixbench\.\S+)'
                match4 = re.search(pattern4, line)
                
                if match1:
                    # 完整的双标准差格式
                    try:
                        base_value = float(match1.group(1))
                        base_stddev = float(match1.group(2))
                        change_str = match1.group(3)
                        head_value = float(match1.group(4))
                        head_stddev = float(match1.group(5))
                        metric_name = match1.group(6).strip()
                        
                        logger.debug(f"Format1 - Parsed: base={base_value}±{base_stddev}%, head={head_value}±{head_stddev}%, change={change_str}, metric={metric_name}")
                    except (ValueError, IndexError) as e:
                        logger.warning(f"Failed to parse format1 line: {line} - {e}")
                        continue
                        
                elif match2:
                    # base无标准差, head有标准差
                    try:
                        base_value = float(match2.group(1))
                        base_stddev = None  # base没有标准差
                        change_str = match2.group(2)
                        head_value = float(match2.group(3))
                        head_stddev = float(match2.group(4))
                        metric_name = match2.group(5).strip()
                        
                        logger.debug(f"Format2 - Parsed: base={base_value} (no stddev), head={head_value}±{head_stddev}%, change={change_str}, metric={metric_name}")
                    except (ValueError, IndexError) as e:
                        logger.warning(f"Failed to parse format2 line: {line} - {e}")
                        continue
                        
                elif match3:
                    # base有标准差, head无标准差
                    try:
                        base_value = float(match3.group(1))
                        base_stddev = float(match3.group(2))
                        change_str = match3.group(3)
                        head_value = float(match3.group(4))
                        head_stddev = None  # head没有标准差
                        metric_name = match3.group(5).strip()
                        
                        logger.debug(f"Format3 - Parsed: base={base_value}±{base_stddev}%, head={head_value} (no stddev), change={change_str}, metric={metric_name}")
                    except (ValueError, IndexError) as e:
                        logger.warning(f"Failed to parse format3 line: {line} - {e}")
                        continue
                        
                elif match4:
                    # 都没有标准差
                    try:
                        base_value = float(match4.group(1))
                        base_stddev = None
                        change_str = match4.group(2)
                        head_value = float(match4.group(3))
                        head_stddev = None
                        metric_name = match4.group(4).strip()
                        
                        logger.debug(f"Format4 - Parsed: base={base_value}, head={head_value}, change={change_str}, metric={metric_name} (no stddev)")
                    except (ValueError, IndexError) as e:
                        logger.warning(f"Failed to parse format4 line: {line} - {e}")
                        continue
                else:
                    logger.warning(f"No regex pattern matched unixbench line: {line}")
                    continue
                
                # 解析变化百分比（所有格式通用）
                if change_str.endswith('%'):
                    change_percent = change_str
                    change_value = head_value - base_value
                else:
                    change_percent = "0.0%"
                    change_value = 0.0
                
                # 计算显著性（修复判断逻辑）
                is_significant = True  # 默认为显著
                if base_stddev is not None and head_stddev is not None:
                    # 如果两边都有标准差，使用较大的标准差作为判断基准
                    max_stddev_percent = max(base_stddev, head_stddev)
                    stddev_threshold = max(base_value, head_value) * max_stddev_percent / 100
                    is_significant = abs(change_value) > stddev_threshold
                elif base_stddev is not None:
                    # 只有base有标准差
                    stddev_threshold = base_value * base_stddev / 100
                    is_significant = abs(change_value) > stddev_threshold
                elif head_stddev is not None:
                    # 只有head有标准差
                    stddev_threshold = head_value * head_stddev / 100
                    is_significant = abs(change_value) > stddev_threshold
                # 如果都没有标准差，默认为显著
                
                results.append({
                    'metric': metric_name,
                    'base_value': base_value,
                    'base_stddev': base_stddev,
                    'head_value': head_value, 
                    'head_stddev': head_stddev,
                    'change_value': change_value,
                    'change_percent': change_percent,
                    'is_significant': is_significant
                })
        
        logger.debug(f"Parsed {len(results)} results total")
        return results
    
    def perform_commit_comparison(self, head_commit, base_commit, hours=168, suite=None, testbox=None):
        """执行 commit 对比分析"""
        logger.info(f"Comparing {head_commit} vs {base_commit}")
        
        # 查询两个 commit 的任务
        head_jobs, base_jobs = self.query_jobs_by_commits(
            head_commit, base_commit, hours, suite, testbox
        )
        
        if not head_jobs:
            logger.warning(f"No jobs found for head commit: {head_commit}")
            return []
        
        if not base_jobs:
            logger.warning(f"No jobs found for base commit: {base_commit}")
            return []
        
        # 分组任务
        head_groups = self._group_jobs_for_comparison(head_jobs)
        base_groups = self._group_jobs_for_comparison(base_jobs)
        
        comparison_results = []
        
        # 找到共同的组（相同的 git_repo + test + testbox）
        common_groups = set(head_groups.keys()) & set(base_groups.keys())
        
        for group_key in common_groups:
            git_repo, test_item, testbox = group_key
            
            # 确保 git_repo 不为空，保证对比在相同仓库下进行
            if not git_repo:
                logger.warning(f"Skipping comparison for group with empty git_repo: {group_key}")
                continue
            
            # 获取每组的 job_ids（最多取3个）
            head_job_ids = [job['job_id'] for job in head_groups[group_key][:3]]
            base_job_ids = [job['job_id'] for job in base_groups[group_key][:3]]
            
            # 至少要有1个任务才进行对比
            if not head_job_ids or not base_job_ids:
                continue
                
            logger.info(f"Comparing {git_repo}/{test_item} on {testbox}: "
                       f"{len(head_job_ids)} head jobs vs {len(base_job_ids)} base jobs")
            
            # 调用 compare 命令
            compare_output = self._call_compare_command(head_job_ids, base_job_ids)
            
            if compare_output:
                logger.debug(f"Compare output for {group_key}:\n{compare_output}")
                
                # 解析输出
                metrics = self._parse_compare_output(compare_output)
                
                logger.debug(f"Parsed {len(metrics)} metrics for {group_key}: {[m['metric'] for m in metrics]}")
                
                for metric in metrics:
                    comparison_results.append({
                        'git_repo': git_repo or 'N/A',
                        'test_item': test_item or 'N/A', 
                        'testbox': testbox or 'N/A',
                        'head_commit': head_commit,
                        'base_commit': base_commit,
                        'metric': metric['metric'],
                        'head_value': metric['head_value'],
                        'head_stddev': metric.get('head_stddev'),        # 新增：head标准差
                        'base_value': metric['base_value'],
                        'base_stddev': metric.get('base_stddev'),        # 新增：base标准差
                        'change_value': metric['change_value'],
                        'change_percent': metric['change_percent'],
                        'is_significant': metric.get('is_significant', True),  # 新增：显著性
                        'head_job_count': len(head_job_ids),
                        'base_job_count': len(base_job_ids),
                        'head_job_ids': head_job_ids,                    # 新增：显示job IDs
                        'base_job_ids': base_job_ids                     # 新增：显示job IDs
                    })
            else:
                logger.warning(f"Failed to get compare results for {group_key}")
        
        logger.info(f"Total comparison results generated: {len(comparison_results)}")
        return comparison_results
    
    def format_comparison_table(self, comparison_results):
        """格式化对比结果为表格，包含标准差信息"""
        if not comparison_results:
            return "No comparison results to display"
        
        # 输出结果分为两部分：1) 详细表格 2) Job IDs信息
        output_lines = []
        
        # 首先输出 Job IDs 信息
        output_lines.append("=" * 100)
        output_lines.append("Performance Comparison Results with Standard Deviation")
        output_lines.append("=" * 100)
        output_lines.append("")
        
        # 按组显示 Job IDs
        groups = {}
        for result in comparison_results:
            key = (result['git_repo'], result['testbox'])
            if key not in groups:
                groups[key] = {
                    'head_job_ids': result['head_job_ids'],
                    'base_job_ids': result['base_job_ids'],
                    'head_commit': result['head_commit'],
                    'base_commit': result['base_commit']
                }
        
        output_lines.append("Job IDs used in comparison:")
        for (git_repo, testbox), group_info in groups.items():
            output_lines.append(f"  {git_repo} on {testbox}:")
            output_lines.append(f"    Head ({group_info['head_commit'][:10]}): {','.join(group_info['head_job_ids'])}")
            output_lines.append(f"    Base ({group_info['base_commit'][:10]}): {','.join(group_info['base_job_ids'])}")
            output_lines.append("")
        
        # 表头（保留所有重要信息）
        headers = [
            "Git Repo", "Test Item", "Testbox", "Metric", 
            "Head Commit", "Base Commit",
            "Head Value", "Head StdDev", "Base Value", "Base StdDev",
            "Change", "Change %", "Significant", "Jobs (H/B)"
        ]
        
        # 计算列宽
        col_widths = [len(h) for h in headers]
        
        # 准备数据行并计算最大宽度
        rows = []
        for result in comparison_results:
            # 格式化标准差显示
            base_stddev_str = f"±{result['base_stddev']:.1f}%" if result['base_stddev'] is not None else "N/A"
            head_stddev_str = f"±{result['head_stddev']:.1f}%" if result['head_stddev'] is not None else "N/A"
            
            # 显著性标志
            significance = "Yes" if result['is_significant'] else "No"
            if not result['is_significant']:
                significance += " ⚠️"  # 不显著的变化加警告标志
            
            row = [
                result['git_repo'][:15],                              # Git Repo
                result['test_item'][:20],                             # Test Item  
                result['testbox'][:12],                               # Testbox
                result['metric'].replace('unixbench.', '')[:25],      # Metric (移除前缀)
                result['head_commit'][:10],                           # Head Commit
                result['base_commit'][:10],                           # Base Commit
                f"{result['head_value']:.2f}",                        # Head Value
                head_stddev_str,                                      # Head StdDev
                f"{result['base_value']:.2f}",                        # Base Value
                base_stddev_str,                                      # Base StdDev
                f"{result['change_value']:+.2f}",                     # Change
                result['change_percent'],                             # Change %
                significance,                                         # Significant
                f"{result['head_job_count']}/{result['base_job_count']}" # Jobs (H/B)
            ]
            rows.append(row)
            
            # 更新列宽
            for i, cell in enumerate(row):
                col_widths[i] = max(col_widths[i], len(str(cell)))
        
        # 构建表格
        separator = "+" + "+".join("-" * (w + 2) for w in col_widths) + "+"
        header_row = "|" + "|".join(f" {h:<{col_widths[i]}} " for i, h in enumerate(headers)) + "|"
        
        output_lines.extend([
            "Metrics Comparison:",
            separator,
            header_row, 
            separator
        ])
        
        for row in rows:
            data_row = "|" + "|".join(f" {cell:<{col_widths[i]}} " for i, cell in enumerate(row)) + "|"
            output_lines.append(data_row)
        
        output_lines.append(separator)
        
        # 添加统计摘要
        total_metrics = len(comparison_results)
        significant_metrics = sum(1 for r in comparison_results if r['is_significant'])
        insignificant_metrics = total_metrics - significant_metrics
        
        output_lines.append("")
        output_lines.append("Summary:")
        output_lines.append(f"  Total metrics compared: {total_metrics}")
        output_lines.append(f"  Significant changes: {significant_metrics} ({significant_metrics/total_metrics*100:.1f}%)")
        output_lines.append(f"  Insignificant changes: {insignificant_metrics} ({insignificant_metrics/total_metrics*100:.1f}%) ⚠️")
        output_lines.append("")
        output_lines.append("Note: 'Significant' means the change is larger than the standard deviation range.")
        output_lines.append("      Changes smaller than StdDev may be due to measurement noise.")
        
        return "\n".join(output_lines)
    
    def export_comparison_to_csv(self, comparison_results, filename):
        """导出性能对比结果到CSV文件"""
        if not comparison_results:
            logger.warning("No comparison results to export")
            return False
        
        try:
            with open(filename, 'w', newline='', encoding='utf-8') as f:
                fieldnames = [
                    'git_repo', 'test_item', 'testbox', 'metric',
                    'head_commit', 'base_commit', 
                    'head_value', 'head_stddev', 'head_job_ids',
                    'base_value', 'base_stddev', 'base_job_ids',
                    'change_value', 'change_percent', 'is_significant',
                    'head_job_count', 'base_job_count'
                ]
                
                writer = csv.DictWriter(f, fieldnames=fieldnames)
                writer.writeheader()
                
                for result in comparison_results:
                    writer.writerow({
                        'git_repo': result['git_repo'],
                        'test_item': result['test_item'],
                        'testbox': result['testbox'],
                        'metric': result['metric'],
                        'head_commit': result['head_commit'],
                        'base_commit': result['base_commit'],
                        'head_value': result['head_value'],
                        'head_stddev': result['head_stddev'] if result['head_stddev'] is not None else '',
                        'head_job_ids': ','.join(result['head_job_ids']),
                        'base_value': result['base_value'],
                        'base_stddev': result['base_stddev'] if result['base_stddev'] is not None else '',
                        'base_job_ids': ','.join(result['base_job_ids']),
                        'change_value': result['change_value'],
                        'change_percent': result['change_percent'],
                        'is_significant': result['is_significant'],
                        'head_job_count': result['head_job_count'],
                        'base_job_count': result['base_job_count']
                    })
                    
            logger.info(f"Performance comparison results exported to {filename}")
            return True
            
        except Exception as e:
            logger.error(f"Failed to export comparison results: {str(e)}")
            return False
    
    def analyze_results(self, jobs, show_errid=False):
        """分析任务结果"""
        if not jobs:
            return None
        
        analysis = {
            'total': len(jobs),
            'job_health_distribution': defaultdict(int),
            'suite_distribution': defaultdict(int),
            'testbox_distribution': defaultdict(int),
            'commit_distribution': defaultdict(int),
            'metrics': set(),
            'job_stage_distribution': defaultdict(int),
            'job_stage_when_health_none': defaultdict(int),
            'finish_fail_count': 0,
            'finish_fail_ids': [],
            'finish_abort_count': 0,
            'finish_abort_ids': [],
            'data_readiness_distribution': defaultdict(int),
            'earliest_submit': min(jobs, key=lambda x: x['submit_time'])['submit_time'],
            'latest_submit': max(jobs, key=lambda x: x['submit_time'])['submit_time'],
            'jobs': jobs  # 添加原始jobs数据供后续使用
        }
        
        # 只有在show_errid为True时才添加errid_distribution
        if show_errid:
            analysis['errid_distribution'] = defaultdict(list)
        
        # 收集统计信息
        for job in jobs:
            analysis['job_health_distribution'][job['job_health']] += 1
            analysis['suite_distribution'][job['suite']] += 1
            analysis['testbox_distribution'][job['testbox']] += 1
            
            if job['commit']:
                analysis['commit_distribution'][job['commit'][:7]] += 1

            analysis['job_stage_distribution'][job.get('job_stage')] += 1
            if job.get('job_health') is None:
                analysis['job_stage_when_health_none'][job.get('job_stage')] += 1
            
            if job.get('job_stage') == 'finish' and job.get('job_health') == 'fail':
                analysis['finish_fail_count'] += 1
                if len(analysis['finish_fail_ids']) < 10:
                    analysis['finish_fail_ids'].append(job['job_id'])
        
            if job.get('job_stage') == 'finish' and job.get('job_health') == 'abort':
                analysis['finish_abort_count'] += 1
                if len(analysis['finish_abort_ids']) < 10:
                    analysis['finish_abort_ids'].append(job['job_id'])

            # 只有在show_errid为True时才处理errid
            if show_errid:
                errid = job.get('errid')
                if errid is not None and errid != '':
                    # 收集 job_id, git_repo, commit 信息用于分组显示
                    job_info = {
                        'job_id': job['job_id'],
                        'git_repo': job.get('git_repo', 'unknown'),
                        'commit': job.get('commit', 'unknown')[:12] if job.get('commit') else 'unknown'  # 截取前12位
                    }
                    analysis['errid_distribution'][str(errid)].append(job_info)

            stats = job.get('stats', {})
            if isinstance(stats, str):
                try:
                    stats = json.loads(stats)
                except:
                    stats = {}
            
            if isinstance(stats, dict):
                analysis['metrics'].update(stats.keys())
            
            analysis['data_readiness_distribution'][job.get('job_data_readiness', 'N/A')] += 1
        
        # 转换为常规字典
        analysis['job_health_distribution'] = dict(analysis['job_health_distribution'])
        analysis['suite_distribution'] = dict(analysis['suite_distribution'])
        analysis['testbox_distribution'] = dict(analysis['testbox_distribution'])
        analysis['commit_distribution'] = dict(analysis['commit_distribution'])
        analysis['metrics'] = sorted(analysis['metrics'])
        
        # 只有在show_errid为True时才转换errid_distribution
        if show_errid:
            analysis['errid_distribution'] = dict(analysis['errid_distribution'])
        
        return analysis
    
    def format_analysis(self, analysis):
        """格式化分析结果"""
        if not analysis:
            return "无分析结果"
        
        output = [
            "=" * 80,
            "Batch Job Analysis Report",
            "=" * 80,
            f"Total Jobs: {analysis['total']}",
            f"(Note: Analysis includes only the latest {analysis['total']} records, limited by --limit parameter)",
            f"Time Range: {analysis['earliest_submit']} to {analysis['latest_submit']}",
            "",
            "Job Health Distribution:"
        ]

        for status, count in analysis['job_health_distribution'].items():
            output.append(f"  {format_health(status)}: {count} ({count/analysis['total']*100:.1f}%)")

        # Job details for each health status
        health_categories = {
            'abort_wait': 'Abort Wait',
            'abort_invalid': 'Abort Invalid',
            'None': 'None',
            'fail': 'Fail'
        }
        
        # Collect job_ids for each status
        health_job_ids = {health: [] for health in health_categories.keys()}

        for job in analysis.get('jobs', []):
            job_health = job.get('job_health')
            job_id = job.get('job_id')
            if job_health in health_job_ids and job_id:
                health_job_ids[job_health].append(str(job_id))

        # Output detailed job_id lists for each status
        output.append("")
        for health_key, health_name in health_categories.items():
            count = analysis['job_health_distribution'].get(health_key, 0)
            if count > 0:
                output.append(f"\n{health_name}({health_key}) Job Details:")
                output.append(f"  Total: {count}")
                if health_job_ids[health_key]:
                    # Display job_ids, 10 per line
                    job_ids = health_job_ids[health_key]
                    output.append(f"  Job IDs:")
                    for i in range(0, len(job_ids), 10):
                        batch = job_ids[i:i+10]
                        output.append(f"    {', '.join(batch)}")
                else:
                    output.append(f"  No related jobs")
        
        output.append(f"\nJobs with job_stage=finish and job_health=fail: {analysis['finish_fail_count']}")
        if analysis['finish_fail_ids']:
            output.append(f"  First {len(analysis['finish_fail_ids'])} job_id: {', '.join(analysis['finish_fail_ids'])}")
        else:
            output.append("  No related jobs")

        output.append(f"Jobs with job_stage=finish and job_health=abort: {analysis['finish_abort_count']}")
        if analysis['finish_abort_ids']:
            output.append(f"  First {len(analysis['finish_abort_ids'])} job_id: {', '.join(analysis['finish_abort_ids'])}")
        else:
            output.append("  No related jobs")

        output.append("\nJob Stage Distribution:")
        for stage, count in analysis['job_stage_distribution'].items():
            output.append(f"  {format_stage(stage)}: {count}")

        if analysis.get('job_stage_when_health_none'):
            output.append("\nJob Stage Distribution (when job_health=None):")
            for stage, count in analysis['job_stage_when_health_none'].items():
                output.append(f"  {format_stage(stage)}: {count}")

        output.append("\nSuite Distribution:")
        for suite, count in analysis['suite_distribution'].items():
            output.append(f"  {suite}: {count}")

        output.append("\nTestbox Distribution:")
        for testbox, count in analysis['testbox_distribution'].items():
            output.append(f"  {testbox}: {count}")
        
        if analysis.get('errid_distribution'):
            total_errid_jobs = sum(len(v) for v in analysis['errid_distribution'].values())
            output.append("\nJobs with errid:")
            output.append(f"Total: {total_errid_jobs} jobs")

            output.append("\nError ID Distribution (one error per line for easy grep):")
            for i, (errid, job_infos) in enumerate(sorted(analysis['errid_distribution'].items()), 1):
                # 解析 errid 内容 - 支持 JSON 数组和 Python 列表格式
                error_items = None

                # 检查是否是数组格式（JSON 或 Python 列表）
                if errid.startswith('[') and errid.endswith(']'):
                    try:
                        import json
                        # 尝试解析为 JSON 数组（双引号）
                        error_items = json.loads(errid)
                    except (json.JSONDecodeError, ValueError):
                        # 如果 JSON 解析失败，尝试 Python 列表格式（单引号）
                        try:
                            import ast
                            error_items = ast.literal_eval(errid)
                        except (ValueError, SyntaxError):
                            pass

                # 如果成功解析为列表，格式化输出
                if error_items and isinstance(error_items, list):
                    output.append("ERRID: [")
                    # 每个错误项单独一行
                    for idx, error_item in enumerate(error_items):
                        if idx < len(error_items) - 1:
                            output.append(f"  '{error_item}',")
                        else:
                            output.append(f"  '{error_item}'")
                    output.append("]")
                else:
                    # 如果不是数组格式或解析失败，直接输出原始内容
                    output.append(f"ERRID: {errid}")

                # 显示关联的任务信息（紧凑格式，无缩进）
                output.append("")  # 空行分隔 ERRID 和 JOBS
                from collections import defaultdict
                repo_commit_groups = defaultdict(list)

                for job_info in job_infos:
                    # 如果 job_info 是字符串（兼容旧格式），转换为字典
                    if isinstance(job_info, str):
                        job_info = {'job_id': job_info, 'git_repo': 'unknown', 'commit': 'unknown'}

                    repo_commit_key = f"{job_info['git_repo']}@{job_info['commit']}"
                    repo_commit_groups[repo_commit_key].append(job_info['job_id'])

                # 每个 repo@commit 的 jobs 单独一行，无缩进
                for repo_commit, job_ids in sorted(repo_commit_groups.items()):
                    job_ids_str = ','.join(job_ids)
                    output.append(f"JOBS[{repo_commit}]: {job_ids_str}")

                output.append("")  # Empty line to separate different errid groups
        else:
            output.append("\nJobs with errid: 0")
        
        output.append("=" * 80)
        return "\n".join(output)
    
    def export_to_csv(self, jobs, filename):
        """导出结果到CSV文件"""
        if not jobs:
            logger.warning("No jobs to export")
            return False
        
        try:
            with open(filename, 'w', newline='', encoding='utf-8') as f:
                # 定义CSV字段（添加错误摘要）
                fieldnames = ['job_id', 'suite', 'testbox', 'job_health', 'job_stage',
                            'submit_time', 'commit', 'host_machine', 'metrics_count', 'error_summary']
                
                writer = csv.DictWriter(f, fieldnames=fieldnames)
                writer.writeheader()
                
                # 写入数据
                for job in jobs:
                    # 计算指标数量
                    stats = job.get('stats', {})
                    if isinstance(stats, str):
                        try:
                            stats = json.loads(stats)
                        except:
                            stats = {}
                    
                    metrics_count = len(stats) if isinstance(stats, dict) else 0
                    
                    # 提取错误摘要（新字段）
                    error_summary = job.get('errors', '')
                    if isinstance(error_summary, list):
                        error_summary = "; ".join(error_summary)
                    
                    row = {
                        'job_id': job['job_id'],
                        'suite': job['suite'],
                        'testbox': job['testbox'],
                        'job_health': job['job_health'],
                        'job_stage': job.get('job_stage', ''),
                        'submit_time': job['submit_time'],
                        'commit': job.get('commit', ''),
                        'host_machine': job.get('host_machine', ''),
                        'metrics_count': metrics_count,
                        'error_summary': error_summary  # 新增错误摘要字段
                    }
                    writer.writerow(row)
            
            logger.info(f"Results exported to: {filename}")
            return True
        except Exception as e:
            logger.error(f"Export failed: {str(e)}")
            return False
    
    def export_metrics(self, jobs, filename):
        """导出所有指标数据到JSON文件"""
        if not jobs:
            logger.warning("No jobs to export")
            return False
        
        try:
            metrics_data = []
            
            for job in jobs:
                stats = job.get('stats', {})
                if isinstance(stats, str):
                    try:
                        stats = json.loads(stats)
                    except:
                        stats = {}
                
                if not stats:
                    continue
                
                entry = {
                    'job_id': job['job_id'],
                    'suite': job['suite'],
                    'testbox': job['testbox'],
                    'job_health': job['job_health'],
                    'job_stage': job.get('job_stage', ''),
                    'submit_time': job['submit_time'],
                    'commit': job.get('commit', ''),
                    'host_machine': job.get('host_machine', ''),
                    'metrics': stats
                }
                metrics_data.append(entry)
            
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(metrics_data, f, indent=2, ensure_ascii=False)
            
            logger.info(f"Metrics data exported to: {filename}")
            return True
        except Exception as e:
            logger.error(f"Metrics export failed: {str(e)}")
            return False

    def search_scheduler_logs(self, job_id: str, log_path: str, context_lines: int = 5) -> str:
        try:
            import re
            job_id = str(job_id)
            # 创建精确匹配模式：匹配 job_id=value 格式
            job_id_pattern = re.compile(
                r'job_id\s*=\s*' + re.escape(job_id) +  # 匹配 job_id=value
                r'([^a-zA-Z0-9]|$)'  # 确保ID后是非字母数字或行尾
            )
            
            if not os.path.exists(log_path):
                return f"Log path does not exist: {log_path}"
            
            # 收集日志文件
            log_files = []
            if os.path.isfile(log_path):
                log_files = [log_path]
            elif os.path.isdir(log_path):
                # 获取所有 .log 文件并按修改时间排序（最新的在前）
                all_logs = [f for f in os.listdir(log_path) 
                            if os.path.isfile(os.path.join(log_path, f)) 
                            and f.endswith('.log')]
                all_logs.sort(key=lambda f: os.path.getmtime(os.path.join(log_path, f)), 
                             reverse=True)
                log_files = [os.path.join(log_path, f) for f in all_logs[:3]]  # 只取最新的3个文件
            
            if not log_files:
                return f"No log files found: {log_path}"
            
            results = []
            any_errors_found = False  # 标记是否找到任何错误
            
            for log_file in log_files:
                try:
                    file_size = os.path.getsize(log_file)
                    if file_size > 100 * 1024 * 1024:  # 100MB
                        results.append(f"Skipping large file: {os.path.basename(log_file)} ({file_size//1024//1024}MB)")
                        continue
                    
                    # 使用新的模式进行日志扫描
                    with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
                        lines = f.readlines()
                        total_lines = len(lines)
                        
                        for i, line in enumerate(lines):
                            # 检查是否匹配到目标job_id
                            if job_id_pattern.search(line):
                                context_block = []
                                # 添加匹配行
                                context_block.append(f">>> {line.strip()}")
                                
                                # 检查后续4行内的ERROR
                                has_error = False
                                for j in range(1, min(5, total_lines-i)):
                                    next_line = lines[i+j].strip()
                                    context_block.append(f"    {next_line}")
                                    
                                    # 只检查ERROR（忽略WARN）
                                    if "ERROR" in next_line:
                                        has_error = True
                                        any_errors_found = True  # 标记找到错误
                                
                                if has_error:
                                    results.append(f"File: {os.path.basename(log_file)}")
                                    results.append(f"Error found after job ID at line {i+1}:")
                                    results.extend(context_block)
                                    results.append('-' * 40)
                    
                except Exception as e:
                    results.append(f"Error reading {log_file}: {str(e)}")
            
            # 只有在没有任何文件中找到错误时才显示"未找到错误"
            if not any_errors_found and results:
                results.append(f"No ERROR found for job ID: {job_id}")
            
            if results:
                return '\n'.join(results)
            return f"No relevant logs found for job ID: {job_id}"
        
        except Exception as e:
            return f"Log search failed: {str(e)}"
        

def main():
    parser = argparse.ArgumentParser(
        description='Batch query and analyze jobs submitted by linux_auto_test.py',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    
    # Query parameters
    parser.add_argument(
        '--hours',
        type=int,
        default=24,
        help='Query jobs from the last N hours'
    )
    parser.add_argument(
        '--suite',
        help='Filter by specific test suite (e.g., unixbench, ltp)'
    )
    parser.add_argument(
        '--testbox',
        help='Filter by specific testbox type (e.g., vm-2p8g)'
    )
    parser.add_argument(
        '--commit',
        help='Filter by specific commit hash (prefix matching supported)'
    )
    parser.add_argument(
        '--metric',
        help='Filter jobs containing a specific metric'
    )
    parser.add_argument(
        '--limit',
        type=int,
        default=1000,
        help='Maximum number of results to return'
    )
    
    # Output options
    parser.add_argument(
        '--show-jobs',
        action='store_true',
        help='Display job details'
    )
    parser.add_argument(
        '--show-stats',
        action='store_true',
        help='Display job metrics data'
    )
    parser.add_argument(
        '--show-errid',
        action='store_true',
        help='Display error ID information for jobs'
    )
    parser.add_argument(
        '--export-csv',
        help='Export results to CSV file'
    )
    parser.add_argument(
        '--export-metrics',
        help='Export all metrics data to JSON file'
    )
    
    # Debug options
    parser.add_argument(
        '--debug',
        action='store_true',
        help='Enable debug mode'
    )
    
    # Job filtering parameters
    parser.add_argument(
        '--job-stage',
        help='Only show jobs with specific job_stage (e.g., finish, submit, abort_provider)'
    )
    parser.add_argument(
        '--job-health',
        help='Only show jobs with specific job_health (e.g., fail, success, abort_invalid, none)'
    )
    
    # 调度器日志参数
    parser.add_argument(
        '--scheduler-log',
        help='Path to scheduler logs (file or directory)'
    )
    parser.add_argument(
        '--log-context',
        type=int,
        default=5,
        help='Number of context lines to show (default 5)'
    )
    parser.add_argument(
        '--show-logs',
        action='store_true',
        help='Show scheduler logs (requires --scheduler-log parameter)'
    )
    
    # 对比功能参数
    parser.add_argument(
        '--compare-commits',
        action='store_true',
        help='Compare performance between two commits'
    )
    parser.add_argument(
        '--head-commit',
        help='Head commit for comparison (e.g., v6.16, abc123)'
    )
    parser.add_argument(
        '--base-commit', 
        help='Base commit for comparison (e.g., v6.15, def456)'
    )
    
    args = parser.parse_args()
    
    if args.debug:
        logger.setLevel(logging.DEBUG)
        logger.debug("Debug mode enabled")
    
    # 创建查询工具
    query_tool = BatchJobQuery()
    
    # 检查是否执行对比功能
    if args.compare_commits:
        if not args.head_commit or not args.base_commit:
            logger.error("--compare-commits requires both --head-commit and --base-commit")
            return
        
        logger.info("Performing commit comparison...")
        comparison_results = query_tool.perform_commit_comparison(
            args.head_commit, args.base_commit,
            hours=args.hours,
            suite=args.suite, 
            testbox=args.testbox
        )
        
        if comparison_results:
            print("\n" + "=" * 100)
            print("COMMIT COMPARISON RESULTS")
            print("=" * 100)
            print(query_tool.format_comparison_table(comparison_results))
            print(f"\nTotal comparisons: {len(comparison_results)}")
            
            # 导出对比结果到CSV（如果指定了导出文件）
            if args.export_csv:
                success = query_tool.export_comparison_to_csv(comparison_results, args.export_csv)
                if success:
                    print(f"\nComparison results exported to: {args.export_csv}")
                else:
                    logger.error(f"Failed to export comparison results to: {args.export_csv}")
        else:
            logger.warning("No comparison results generated")
        
        query_tool.disconnect()
        return
    
    # 执行正常查询
    jobs = query_tool.query_jobs(
        hours=args.hours,
        suite=args.suite,
        testbox=args.testbox,
        health=args.job_health,
        stage=args.job_stage,
        commit=args.commit,
        metric=args.metric,
        limit=args.limit,
        show_errid=args.show_errid
    )
    
    # 提取错误摘要并存储到jobs字典
    if args.scheduler_log:
        # 添加正则表达式导入
        import re
        # 编译时间戳模式
        timestamp_pattern = re.compile(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}[+\-]\d{4}')
        
        for job in jobs:
            log_result = query_tool.search_scheduler_logs(
                job_id=job['job_id'],
                log_path=args.scheduler_log,
                context_lines=args.log_context
            )
            # 提取错误行摘要
            error_lines = []
            for line in log_result.split('\n'):
                if "ERROR" in line:
                    # 移除时间戳和日志级别
                    clean_message = timestamp_pattern.sub('', line)
                    clean_message = clean_message.replace("ERROR", "").strip()
                    
                    # 移除上下文标记
                    clean_message = clean_message.replace(">>>", "").replace("    ", "").strip()
                    
                    # 确保非空行
                    if clean_message:
                        error_lines.append(clean_message)
            job['errors'] = error_lines
    
    if not jobs:
        logger.info("No matching jobs found")
        return
    
    logger.info(f"Found {len(jobs)} matching jobs")
    
    # 分析结果
    analysis = query_tool.analyze_results(jobs, show_errid=args.show_errid)
    print(query_tool.format_analysis(analysis))
    
    # 显示任务详情
    if args.show_jobs:
        print("\nJob Details:")
        for job in jobs:
            # 过滤 job_stage
            if args.job_stage:
                if str(job.get('job_stage')) != args.job_stage and job.get('job_stage') != args.job_stage:
                    # 支持用名称或编号过滤
                    if not (job.get('job_stage') and format_stage(job.get('job_stage')).startswith(args.job_stage)):
                        continue
            # 过滤 job_health
            if args.job_health:
                if args.job_health.lower() == 'none':
                    if job.get('job_health') is not None:
                        continue
                else:
                    if str(job.get('job_health')) != args.job_health and job.get('job_health') != args.job_health:
                        if not (job.get('job_health') and format_health(job.get('job_health')).startswith(args.job_health)):
                            continue

            print(f"job_id: {job['job_id']}")
            print(f"suite: {job['suite']}")
            print(f"testbox: {job['testbox']}")
            print(f"job_health: {format_health(job['job_health'])}")
            if 'job_stage' in job:
                print(f"job_stage: {format_stage(job['job_stage'])}")
            print(f"submit_time: {job['submit_time']}")
            if job.get('commit'):
                print(f"commit: {job['commit']}")
            if job.get('host_machine'):
                print(f"host_machine: {job['host_machine']}")
            if job.get('git_repo'):
                print(f"git_repo: {job['git_repo']}")
            if job.get('unixbench_test'):
                print(f"unixbench_test: {job['unixbench_test']}")
            
            # Display error summary (if any)
            if job.get('errors'):
                print("  Error Summary:")
                for error in job['errors']:
                    print(f"    - {error}")

            # Display metrics data
            if args.show_stats:
                stats = job.get('stats', {})
                if isinstance(stats, str):
                    try:
                        stats = json.loads(stats)
                    except:
                        stats = {}

                if stats:
                    print("  Metrics:")
                    for metric, value in stats.items():
                        print(f"    {metric}: {value}")

            # Display logs (when conditions are met)
            if args.scheduler_log and (args.show_logs or job.get('job_health') is None):
                log_result = query_tool.search_scheduler_logs(
                    job_id=job['job_id'],
                    log_path=args.scheduler_log,
                    context_lines=args.log_context
                )
                # Format log output
                print("  Scheduler Logs:")
                for line in log_result.split('\n'):
                    print(f"    {line}")
                stats = job.get('stats', {})
                if isinstance(stats, str):
                    try:
                        stats = json.loads(stats)
                    except:
                        stats = {}

                if stats:
                    print("  Metrics:")
                    for metric, value in stats.items():
                        print(f"    {metric}: {value}")
            print("-" * 60)
    
    # 导出结果
    if args.export_csv:
        query_tool.export_to_csv(jobs, args.export_csv)
    
    if args.export_metrics:
        query_tool.export_metrics(jobs, args.export_metrics)
    
    # 断开数据库连接
    # query_tool.disconnect()

if __name__ == "__main__":
    main()
