#!/usr/bin/env python3
# SPDX-License-Identifier: MulanPSL-2.0+
# Batch query and analyze jobs submitted by linux_auto_test.py

import os
import sys
import time
import argparse
import mysql.connector
from datetime import datetime, timedelta
import csv
import json
from collections import defaultdict

# Status name to ID mapping
JOB_DATA_READINESS_NAME2ID = {
    "N/A": 0,
    "uploading": 1,
    "uploaded": 2,
    "complete": 3,
    "incomplete": 4,
    "norun": 5
}

JOB_STAGE_NAME2ID = {
    "submit": 0, "dispatch": 1, "boot": 2, "setup": 3, "wait_peer": 4, "running": 5,
    "post_run": 6, "manual_check": 7, "renew": 8, "finish": 9, "cancel": 10,
    "abort_invalid": 11, "abort_wait": 12, "abort_provider": 13,
}
JOB_HEALTH_NAME2ID = {
    "unknown": 0, "success": 1, "fail": 2, "cancel": 10, "wget_kernel_fail": 20,
    "wget_initrd_fail": 21, "initrd_broken": 22, "load_disk_fail": 23, "error_mount": 24,
    "microcode_mismatch": 25, "abort_invalid": 30, "abort_wait": 31, "abort_provider": 32,
    "abort": 33, "soft_timeout": 40, "nfs_hang": 41, "oom": 42, "kernel_panic": 43,
    "terminate": 44, "disturbed": 45, "timeout_dispatch": 50, "timeout_boot": 51,
    "timeout_setup": 52, "timeout_wait_peer": 53, "timeout_running": 54,
    "timeout_post_run": 55, "timeout_manual_check": 56, "timeout_renew": 57,
}

def format_stage(stage):
    if stage in JOB_STAGE_NAME2ID:
        return f"{stage}({JOB_STAGE_NAME2ID[stage]})"
    return str(stage)

def format_health(health):
    if health in JOB_HEALTH_NAME2ID:
        return f"{health}({JOB_HEALTH_NAME2ID[health]})"
    return str(health)

def format_data_readiness(readiness):
    if readiness in JOB_DATA_READINESS_NAME2ID:
        return f"{readiness}({JOB_DATA_READINESS_NAME2ID[readiness]})"
    return str(readiness)

# Logging configuration
import logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[logging.StreamHandler(sys.stdout)]
)
logger = logging.getLogger('batch_query')

class BatchJobQuery:
    """Batch job results query and analysis tool"""
    
    def __init__(self, db_config=None):
        # Database configuration (using environment variables or defaults)
        self.db_config = db_config or {
            'host': os.getenv('MANTICORE_HOST', 'localhost'),
            'port': int(os.getenv('MANTICORE_PORT', '9306')),
            'user': os.getenv('MANTICORE_USER', ''),
            'password': os.getenv('MANTICORE_PASSWORD', ''),
            'database': os.getenv('MANTICORE_DB', 'jobs')
        }
        self.connection = None
    
    def connect(self):
        """Connect to database"""
        try:
            self.connection = mysql.connector.connect(**self.db_config)
            logger.info(f"Database connection established: {self.db_config['host']}:{self.db_config['port']}")
            return True
        except mysql.connector.Error as err:
            logger.error(f"Database connection failed: {err}")
            return False
    
    def disconnect(self):
        """Close database connection"""
        if self.connection and self.connection.is_connected():
            self.connection.close()
            logger.info("Database connection closed")
    
    def query_jobs(self, hours=24, suite=None, testbox=None, status=None, 
                  commit=None, metric=None, limit=1000):
        """
        查询批量提交的任务
        
        :param hours: 查询最近多少小时内的任务
        :param suite: 筛选特定的测试套件 (如: unixbench, ltp)
        :param testbox: 筛选特定的测试机类型
        :param status: 筛选任务状态 (good, bad, skip, error)
        :param commit: 筛选特定的提交哈希
        :param metric: 筛选包含特定指标的任务
        :param limit: 返回结果数量限制
        :return: 任务结果列表
        """
        if not self.connection or not self.connection.is_connected():
            if not self.connect():
                return []
        
        try:
            cursor = self.connection.cursor(dictionary=True)
            
            # 计算时间范围
            cutoff_time = datetime.now() - timedelta(hours=hours)
            unix_cutoff = int(cutoff_time.timestamp())
            
            # 构建查询条件
            conditions = [f"submit_time >= {unix_cutoff}"]
            params = []
            
            if suite:
                conditions.append("j.suite = %s")
                params.append(suite)
            
            if testbox:
                conditions.append("j.testbox = %s")
                params.append(testbox)
            
            if status:
                conditions.append("j.job_health = %s")
                params.append(status)
            
            if commit:
                # 支持模糊匹配提交哈希
                conditions.append("j.ss.linux.commit= %s OR j.program.makepkg.commit=%s")
                params.append(f"{commit}")
                params.append(f"{commit}")
            
            # 添加批量任务特征条件
            conditions.append("(j.program IS NOT NULL OR j.ss IS NOT NULL)")
            
            if metric:
                conditions.append("JSON_CONTAINS_PATH(stats, 'one', %s) = 1")
                params.append(f"$.{metric}")
            
            where_clause = " AND ".join(conditions)
            
            # 构建查询
            query = f"""
                SELECT id as job_id, submit_time, j.result_root as result_root, j, j.suite as suite, j.testbox as testbox, j.job_health as job_health, j.job_stage as job_stage, full_text_kv
                FROM jobs
                WHERE {where_clause}
                ORDER BY submit_time DESC
                LIMIT %s
                OPTION max_matches = 10000
            """
            params.append(limit)
            
            logger.debug(f"Executing query: {query} {params}")
            cursor.execute(query, params)
            results = cursor.fetchall()
            
            # Convert timestamp and extract commit hash
            for job in results:
                job['job_id'] = str(job['job_id'])  # Ensure job_id is string
                job['submit_time'] = datetime.fromtimestamp(job['submit_time']).strftime('%Y-%m-%d %H:%M:%S')
                
                # Extract data from JSON
                job_data = json.loads(job['j']) if isinstance(job['j'], str) else job['j']
                job['commit'] = self._extract_commit(job_data)
                job['host_machine'] = job_data.get('host_machine')
                job['job_data_readiness'] = job_data.get('job_data_readiness', 'N/A')
                
                # Extract Git repo and unixbench test from full_text_kv
                full_text_kv = job.get('full_text_kv', '')
                job['git_repo'] = self._extract_github_repo(full_text_kv)
                job['unixbench_test'] = self._extract_unixbench_test(full_text_kv)
            
            return results
            
        except mysql.connector.Error as err:
            logger.error(f"Query failed: {err}")
            return []
        except json.JSONDecodeError as err:
            logger.error(f"JSON decode failed: {err}")
            return []
        finally:
            if cursor:
                cursor.close()
    
    def _extract_commit(self, job_data):
        """从任务数据中提取提交哈希"""
        try:
            # 尝试从ss字段提取
            if job_data.get('ss') and job_data['ss'].get('linux') and job_data['ss']['linux'].get('commit'):
                return job_data['ss']['linux']['commit']
            
            # 尝试从program字段提取
            if job_data.get('program') and job_data['program'].get('makepkg') and job_data['program']['makepkg'].get('commit'):
                return job_data['program']['makepkg']['commit']
            
            # 尝试从program的其他子字段提取
            for prog in job_data.get('program', {}):
                if 'commit' in job_data['program'][prog]:
                    return job_data['program'][prog]['commit']
            
            return None
        except:
            return None
    
    def _extract_github_repo(self, full_text_kv):
        """从 full_text_kv 中提取 Git 仓库名"""
        if not full_text_kv:
            return None
        
        try:
            import re
            # 查找 ss.linux._url= 或 pp.makepkg._url= 模式
            url_match = re.search(r'(?:ss\.linux\._url|pp\.makepkg\._url)=([^\s]+)', full_text_kv)
            if url_match:
                url = url_match.group(1)
                # 从 URL 中提取仓库名
                # 支持格式: git+https://mirrors.tuna.tsinghua.edu.cn/git/linux-stable.git
                # 或者: https://github.com/user/repo.git
                repo_match = re.search(r'/([^/]+?)(?:\.git)?/?$', url)
                if repo_match:
                    return repo_match.group(1)
            return None
        except:
            return None
    
    def _extract_unixbench_test(self, full_text_kv):
        """从 full_text_kv 中提取 unixbench 测试项"""
        if not full_text_kv:
            return None
        
        try:
            import re
            # 查找 pp.unixbench.test= 模式
            test_match = re.search(r'pp\.unixbench\.test=([^\s]+)', full_text_kv)
            if test_match:
                return test_match.group(1)
            return None
        except:
            return None
    
    def query_jobs_by_commits(self, head_commit, base_commit, hours=168, suite=None, testbox=None, limit=1000):
        """
        查询两个不同 commit 的任务数据用于对比
        
        :param head_commit: head commit 标识
        :param base_commit: base commit 标识  
        :param hours: 查询最近多少小时内的任务
        :param suite: 筛选特定的测试套件
        :param testbox: 筛选特定的测试机类型
        :param limit: 返回结果数量限制
        :return: (head_jobs, base_jobs) 元组
        """
        head_jobs = self.query_jobs(hours=hours, suite=suite, testbox=testbox, 
                                  commit=head_commit, limit=limit)
        base_jobs = self.query_jobs(hours=hours, suite=suite, testbox=testbox,
                                  commit=base_commit, limit=limit)
        
        return head_jobs, base_jobs
    
    def _group_jobs_for_comparison(self, jobs):
        """按 git_repo + unixbench_test + testbox 分组任务"""
        from collections import defaultdict
        groups = defaultdict(list)
        
        for job in jobs:
            if job.get('job_health') != 'success' or job.get('job_stage') != 'finish':
                continue  # 只处理成功完成的任务
                
            git_repo = job.get('git_repo')
            unixbench_test = job.get('unixbench_test') 
            testbox = job.get('testbox')
            
            # 对于 unixbench 套件，需要有具体的测试项
            if job.get('suite') == 'unixbench' and unixbench_test:
                group_key = (git_repo, unixbench_test, testbox)
                groups[group_key].append(job)
            # 对于其他套件，直接使用 suite 作为测试项
            elif job.get('suite') != 'unixbench':
                group_key = (git_repo, job.get('suite'), testbox)
                groups[group_key].append(job)
                
        return groups
    
    def _call_compare_command(self, head_job_ids, base_job_ids):
        """调用 compare 命令进行对比"""
        try:
            import subprocess
            import os
            
            # 构建 compare 命令
            cci_src = os.getenv('CCI_SRC', '/c/lkp-tests')  # 默认路径
            compare_cmd = f"{cci_src}/sbin/compare"
            
            head_ids = ','.join(head_job_ids)
            base_ids = ','.join(base_job_ids)
            
            cmd = [compare_cmd, f"id={head_ids}", f"id={base_ids}"]
            
            logger.debug(f"Executing compare command: {' '.join(cmd)}")
            
            # 执行命令
            result = subprocess.run(cmd, capture_output=True, text=True, timeout=300)
            
            if result.returncode == 0:
                return result.stdout
            else:
                logger.error(f"Compare command failed: {result.stderr}")
                return None
                
        except subprocess.TimeoutExpired:
            logger.error("Compare command timed out")
            return None
        except Exception as e:
            logger.error(f"Error calling compare command: {str(e)}")
            return None
    
    def _parse_compare_output(self, compare_output):
        """解析 compare 命令的输出，提取 unixbench 相关指标"""
        if not compare_output:
            return []
        
        results = []
        lines = compare_output.strip().split('\n')
        
        # 跳过表头，找到数据行
        data_started = False
        for line in lines:
            line = line.strip()
            if not line:
                continue
                
            # 检查是否是表头分隔符
            if '----' in line and not data_started:
                data_started = True
                continue
                
            if not data_started:
                continue
                
            # 解析数据行
            if line.startswith('unixbench.'):
                parts = line.split()
                if len(parts) >= 4:
                    metric_name = parts[3]  # unixbench.xxx
                    
                    # 解析数值和变化
                    try:
                        base_value = float(parts[0])
                        change_str = parts[1]  # +138.8% 格式
                        head_value = float(parts[2])
                        
                        # 解析变化百分比
                        if '+' in change_str or '-' in change_str:
                            change_percent = change_str.replace('%', '')
                            change_value = head_value - base_value
                        else:
                            change_percent = "0.0"
                            change_value = 0.0
                            
                        results.append({
                            'metric': metric_name,
                            'base_value': base_value,
                            'head_value': head_value, 
                            'change_value': change_value,
                            'change_percent': change_percent
                        })
                    except (ValueError, IndexError) as e:
                        logger.debug(f"Failed to parse compare line: {line} - {e}")
                        continue
        
        return results
    
    def perform_commit_comparison(self, head_commit, base_commit, hours=168, suite=None, testbox=None):
        """执行 commit 对比分析"""
        logger.info(f"Comparing {head_commit} vs {base_commit}")
        
        # 查询两个 commit 的任务
        head_jobs, base_jobs = self.query_jobs_by_commits(
            head_commit, base_commit, hours, suite, testbox
        )
        
        if not head_jobs:
            logger.warning(f"No jobs found for head commit: {head_commit}")
            return []
        
        if not base_jobs:
            logger.warning(f"No jobs found for base commit: {base_commit}")
            return []
        
        # 分组任务
        head_groups = self._group_jobs_for_comparison(head_jobs)
        base_groups = self._group_jobs_for_comparison(base_jobs)
        
        comparison_results = []
        
        # 找到共同的组（相同的 git_repo + test + testbox）
        common_groups = set(head_groups.keys()) & set(base_groups.keys())
        
        for group_key in common_groups:
            git_repo, test_item, testbox = group_key
            
            # 获取每组的 job_ids（最多取3个）
            head_job_ids = [job['job_id'] for job in head_groups[group_key][:3]]
            base_job_ids = [job['job_id'] for job in base_groups[group_key][:3]]
            
            # 至少要有1个任务才进行对比
            if not head_job_ids or not base_job_ids:
                continue
                
            logger.info(f"Comparing {git_repo}/{test_item} on {testbox}: "
                       f"{len(head_job_ids)} head jobs vs {len(base_job_ids)} base jobs")
            
            # 调用 compare 命令
            compare_output = self._call_compare_command(head_job_ids, base_job_ids)
            
            if compare_output:
                # 解析输出
                metrics = self._parse_compare_output(compare_output)
                
                for metric in metrics:
                    comparison_results.append({
                        'git_repo': git_repo or 'N/A',
                        'test_item': test_item or 'N/A', 
                        'testbox': testbox or 'N/A',
                        'head_commit': head_commit,
                        'base_commit': base_commit,
                        'metric': metric['metric'],
                        'head_value': metric['head_value'],
                        'base_value': metric['base_value'],
                        'change_value': metric['change_value'],
                        'change_percent': metric['change_percent'],
                        'head_job_count': len(head_job_ids),
                        'base_job_count': len(base_job_ids)
                    })
            else:
                logger.warning(f"Failed to get compare results for {group_key}")
        
        return comparison_results
    
    def format_comparison_table(self, comparison_results):
        """格式化对比结果为表格"""
        if not comparison_results:
            return "No comparison results to display"
        
        # 表头
        headers = [
            "Git Repo", "Test Item", "Testbox", "Metric", 
            "Head Commit", "Base Commit", "Head Value", "Base Value", 
            "Change", "Change %", "Jobs (H/B)"
        ]
        
        # 计算列宽
        col_widths = [len(h) for h in headers]
        
        # 准备数据行并计算最大宽度
        rows = []
        for result in comparison_results:
            row = [
                result['git_repo'][:15],  # 限制长度
                result['test_item'][:20],
                result['testbox'][:12],
                result['metric'].replace('unixbench.', '')[:25],  # 移除前缀
                result['head_commit'][:10],
                result['base_commit'][:10], 
                f"{result['head_value']:.2f}",
                f"{result['base_value']:.2f}",
                f"{result['change_value']:+.2f}",
                f"{result['change_percent']}%",
                f"{result['head_job_count']}/{result['base_job_count']}"
            ]
            rows.append(row)
            
            # 更新列宽
            for i, cell in enumerate(row):
                col_widths[i] = max(col_widths[i], len(str(cell)))
        
        # 构建表格
        separator = "+" + "+".join("-" * (w + 2) for w in col_widths) + "+"
        header_row = "|" + "|".join(f" {h:<{col_widths[i]}} " for i, h in enumerate(headers)) + "|"
        
        table_lines = [
            separator,
            header_row, 
            separator
        ]
        
        for row in rows:
            data_row = "|" + "|".join(f" {cell:<{col_widths[i]}} " for i, cell in enumerate(row)) + "|"
            table_lines.append(data_row)
        
        table_lines.append(separator)
        
        return "\n".join(table_lines)
    
    def analyze_results(self, jobs):
        """分析任务结果"""
        if not jobs:
            return None
        
        analysis = {
            'total': len(jobs),
            'job_health_distribution': defaultdict(int),  # 修改字段名
            'suite_distribution': defaultdict(int),
            'testbox_distribution': defaultdict(int),
            'commit_distribution': defaultdict(int),
            'metrics': set(),
            'job_stage_distribution': defaultdict(int),
            'job_stage_when_health_none': defaultdict(int),
            'finish_fail_count': 0,
            'finish_fail_ids': [],  # 新增：存储 finish+fail 的 job_id
            'finish_abort_count': 0,
            'finish_abort_ids': [],  # 新增：存储 finish+abort 的 job_id
            'data_readiness_distribution': defaultdict(int),
            'earliest_submit': min(jobs, key=lambda x: x['submit_time'])['submit_time'],
            'latest_submit': max(jobs, key=lambda x: x['submit_time'])['submit_time']
        }
        
        # 收集统计信息
        for job in jobs:
            analysis['job_health_distribution'][job['job_health']] += 1  # 修改字段名
            analysis['suite_distribution'][job['suite']] += 1
            analysis['testbox_distribution'][job['testbox']] += 1
            
            if job['commit']:
                analysis['commit_distribution'][job['commit'][:7]] += 1

            # job_stage 统计
            analysis['job_stage_distribution'][job.get('job_stage')] += 1
            # job_health 为 None 时的 job_stage 统计
            if job.get('job_health') is None:
                analysis['job_stage_when_health_none'][job.get('job_stage')] += 1
            
            # 统计 finish+fail 组合
            if job.get('job_stage') == 'finish' and job.get('job_health') == 'fail':
                analysis['finish_fail_count'] += 1
                # 保存最多10个job_id
                if len(analysis['finish_fail_ids']) < 10:
                    analysis['finish_fail_ids'].append(job['job_id'])
        
            # 统计 finish+abort 组合
            if job.get('job_stage') == 'finish' and job.get('job_health') == 'abort':
                analysis['finish_abort_count'] += 1
                # 保存最多10个job_id
                if len(analysis['finish_abort_ids']) < 10:
                    analysis['finish_abort_ids'].append(job['job_id'])

            # Extract metrics and data readiness
            stats = job.get('stats', {})
            if isinstance(stats, str):
                try:
                    stats = json.loads(stats)
                except:
                    stats = {}
            
            if isinstance(stats, dict):
                analysis['metrics'].update(stats.keys())
            
            # Count data readiness distribution
            analysis['data_readiness_distribution'][job.get('job_data_readiness', 'N/A')] += 1
        
        # 转换为常规字典
        analysis['job_health_distribution'] = dict(analysis['job_health_distribution'])  # 修改字段名
        analysis['suite_distribution'] = dict(analysis['suite_distribution'])
        analysis['testbox_distribution'] = dict(analysis['testbox_distribution'])
        analysis['commit_distribution'] = dict(analysis['commit_distribution'])
        analysis['metrics'] = sorted(analysis['metrics'])
        
        return analysis
    
    def format_analysis(self, analysis):
        """格式化分析结果"""
        if not analysis:
            return "无分析结果"
        
        output = [
            "=" * 80,
            "Batch Job Analysis Report",
            "=" * 80,
            f"Total Jobs: {analysis['total']}",
            f"(Note: Analysis includes only the latest {analysis['total']} records, limited by --limit parameter)",
            f"Time Range: {analysis['earliest_submit']} to {analysis['latest_submit']}",
            "",
            "job_health 分布:"  # 修改标题
        ]
        
        # 添加状态分布
        for status, count in analysis['job_health_distribution'].items():  # 修改字段名
            output.append(f"  {format_health(status)}: {count} ({count/analysis['total']*100:.1f}%)")

        # 新增组合统计输出
        output.append(f"\nJobs with job_stage=finish and job_health=fail: {analysis['finish_fail_count']}")
        if analysis['finish_fail_ids']:
            output.append(f"  前 {len(analysis['finish_fail_ids'])} 个 job_id: {', '.join(analysis['finish_fail_ids'])}")
        else:
            output.append("  无相关任务")
        
        output.append(f"Jobs with job_stage=finish and job_health=abort: {analysis['finish_abort_count']}")
        if analysis['finish_abort_ids']:
            output.append(f"  前 {len(analysis['finish_abort_ids'])} 个 job_id: {', '.join(analysis['finish_abort_ids'])}")
        else:
            output.append("  无相关任务")

        # job_stage 分布
        output.append("\nJob Stage 分布:")
        for stage, count in analysis['job_stage_distribution'].items():
            output.append(f"  {format_stage(stage)}: {count}")

        # job_health=None 时的 job_stage 分布
        if analysis.get('job_stage_when_health_none'):
            output.append("\njob_health=None 时的 job_stage 分布:")
            for stage, count in analysis['job_stage_when_health_none'].items():
                output.append(f"  {format_stage(stage)}: {count}")
        
        output.append("\n套件分布:")
        # 添加套件分布
        for suite, count in analysis['suite_distribution'].items():
            output.append(f"  {suite}: {count}")
        
        output.append("\n测试机分布:")
        # 添加测试机分布
        for testbox, count in analysis['testbox_distribution'].items():
            output.append(f"  {testbox}: {count}")
        
        output.append("=" * 80)
        return "\n".join(output)
    
    def export_to_csv(self, jobs, filename):
        """导出结果到CSV文件"""
        if not jobs:
            logger.warning("No jobs to export")
            return False
        
        try:
            with open(filename, 'w', newline='', encoding='utf-8') as f:
                # 定义CSV字段（添加错误摘要）
                fieldnames = ['job_id', 'suite', 'testbox', 'job_health', 'job_stage',
                            'submit_time', 'commit', 'host_machine', 'metrics_count', 'error_summary']
                
                writer = csv.DictWriter(f, fieldnames=fieldnames)
                writer.writeheader()
                
                # 写入数据
                for job in jobs:
                    # 计算指标数量
                    stats = job.get('stats', {})
                    if isinstance(stats, str):
                        try:
                            stats = json.loads(stats)
                        except:
                            stats = {}
                    
                    metrics_count = len(stats) if isinstance(stats, dict) else 0
                    
                    # 提取错误摘要（新字段）
                    error_summary = job.get('errors', '')
                    if isinstance(error_summary, list):
                        error_summary = "; ".join(error_summary)
                    
                    row = {
                        'job_id': job['job_id'],
                        'suite': job['suite'],
                        'testbox': job['testbox'],
                        'job_health': job['job_health'],
                        'job_stage': job.get('job_stage', ''),
                        'submit_time': job['submit_time'],
                        'commit': job.get('commit', ''),
                        'host_machine': job.get('host_machine', ''),
                        'metrics_count': metrics_count,
                        'error_summary': error_summary  # 新增错误摘要字段
                    }
                    writer.writerow(row)
            
            logger.info(f"Results exported to: {filename}")
            return True
        except Exception as e:
            logger.error(f"Export failed: {str(e)}")
            return False
    
    def export_metrics(self, jobs, filename):
        """导出所有指标数据到JSON文件"""
        if not jobs:
            logger.warning("No jobs to export")
            return False
        
        try:
            metrics_data = []
            
            for job in jobs:
                stats = job.get('stats', {})
                if isinstance(stats, str):
                    try:
                        stats = json.loads(stats)
                    except:
                        stats = {}
                
                if not stats:
                    continue
                
                entry = {
                    'job_id': job['job_id'],
                    'suite': job['suite'],
                    'testbox': job['testbox'],
                    'job_health': job['job_health'],
                    'job_stage': job.get('job_stage', ''),
                    'submit_time': job['submit_time'],
                    'commit': job.get('commit', ''),
                    'host_machine': job.get('host_machine', ''),
                    'metrics': stats
                }
                metrics_data.append(entry)
            
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(metrics_data, f, indent=2, ensure_ascii=False)
            
            logger.info(f"Metrics data exported to: {filename}")
            return True
        except Exception as e:
            logger.error(f"Metrics export failed: {str(e)}")
            return False

    def search_scheduler_logs(self, job_id: str, log_path: str, context_lines: int = 5) -> str:
        try:
            import re
            job_id = str(job_id)
            # 创建精确匹配模式：匹配 job_id=value 格式
            job_id_pattern = re.compile(
                r'job_id\s*=\s*' + re.escape(job_id) +  # 匹配 job_id=value
                r'([^a-zA-Z0-9]|$)'  # 确保ID后是非字母数字或行尾
            )
            
            if not os.path.exists(log_path):
                return f"Log path does not exist: {log_path}"
            
            # 收集日志文件
            log_files = []
            if os.path.isfile(log_path):
                log_files = [log_path]
            elif os.path.isdir(log_path):
                # 获取所有 .log 文件并按修改时间排序（最新的在前）
                all_logs = [f for f in os.listdir(log_path) 
                            if os.path.isfile(os.path.join(log_path, f)) 
                            and f.endswith('.log')]
                all_logs.sort(key=lambda f: os.path.getmtime(os.path.join(log_path, f)), 
                             reverse=True)
                log_files = [os.path.join(log_path, f) for f in all_logs[:3]]  # 只取最新的3个文件
            
            if not log_files:
                return f"No log files found: {log_path}"
            
            results = []
            any_errors_found = False  # 标记是否找到任何错误
            
            for log_file in log_files:
                try:
                    file_size = os.path.getsize(log_file)
                    if file_size > 100 * 1024 * 1024:  # 100MB
                        results.append(f"Skipping large file: {os.path.basename(log_file)} ({file_size//1024//1024}MB)")
                        continue
                    
                    # 使用新的模式进行日志扫描
                    with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
                        lines = f.readlines()
                        total_lines = len(lines)
                        
                        for i, line in enumerate(lines):
                            # 检查是否匹配到目标job_id
                            if job_id_pattern.search(line):
                                context_block = []
                                # 添加匹配行
                                context_block.append(f">>> {line.strip()}")
                                
                                # 检查后续4行内的ERROR
                                has_error = False
                                for j in range(1, min(5, total_lines-i)):
                                    next_line = lines[i+j].strip()
                                    context_block.append(f"    {next_line}")
                                    
                                    # 只检查ERROR（忽略WARN）
                                    if "ERROR" in next_line:
                                        has_error = True
                                        any_errors_found = True  # 标记找到错误
                                
                                if has_error:
                                    results.append(f"File: {os.path.basename(log_file)}")
                                    results.append(f"Error found after job ID at line {i+1}:")
                                    results.extend(context_block)
                                    results.append('-' * 40)
                    
                except Exception as e:
                    results.append(f"Error reading {log_file}: {str(e)}")
            
            # 只有在没有任何文件中找到错误时才显示"未找到错误"
            if not any_errors_found and results:
                results.append(f"No ERROR found for job ID: {job_id}")
            
            if results:
                return '\n'.join(results)
            return f"No relevant logs found for job ID: {job_id}"
        
        except Exception as e:
            return f"Log search failed: {str(e)}"
        

def main():
    parser = argparse.ArgumentParser(
        description='Batch query and analyze jobs submitted by linux_auto_test.py',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    
    # Query parameters
    parser.add_argument(
        '--hours',
        type=int,
        default=24,
        help='Query jobs from the last N hours'
    )
    parser.add_argument(
        '--suite',
        help='Filter by specific test suite (e.g., unixbench, ltp)'
    )
    parser.add_argument(
        '--testbox',
        help='Filter by specific testbox type (e.g., vm-2p8g)'
    )
    parser.add_argument(
        '--status',
        choices=['good', 'bad', 'skip', 'error'],
        help='Filter by job status'
    )
    parser.add_argument(
        '--commit',
        help='Filter by specific commit hash (prefix matching supported)'
    )
    parser.add_argument(
        '--metric',
        help='Filter jobs containing a specific metric'
    )
    parser.add_argument(
        '--limit',
        type=int,
        default=1000,
        help='Maximum number of results to return'
    )
    
    # Output options
    parser.add_argument(
        '--show-jobs',
        action='store_true',
        help='Display job details'
    )
    parser.add_argument(
        '--show-stats',
        action='store_true',
        help='Display job metrics data'
    )
    parser.add_argument(
        '--export-csv',
        help='Export results to CSV file'
    )
    parser.add_argument(
        '--export-metrics',
        help='Export all metrics data to JSON file'
    )
    
    # Debug options
    parser.add_argument(
        '--debug',
        action='store_true',
        help='Enable debug mode'
    )
    
    # Job filtering parameters
    parser.add_argument(
        '--job-stage',
        help='Only show jobs with specific job_stage (e.g., finish, submit, abort_provider)'
    )
    parser.add_argument(
        '--job-health',
        help='Only show jobs with specific job_health (e.g., fail, success, abort_invalid, none)'
    )
    
    # 调度器日志参数
    parser.add_argument(
        '--scheduler-log',
        help='Path to scheduler logs (file or directory)'
    )
    parser.add_argument(
        '--log-context',
        type=int,
        default=5,
        help='Number of context lines to show (default 5)'
    )
    parser.add_argument(
        '--show-logs',
        action='store_true',
        help='显示调度器日志（需要配合--scheduler-log参数）'
    )
    
    # 对比功能参数
    parser.add_argument(
        '--compare-commits',
        action='store_true',
        help='Compare performance between two commits'
    )
    parser.add_argument(
        '--head-commit',
        help='Head commit for comparison (e.g., v6.16, abc123)'
    )
    parser.add_argument(
        '--base-commit', 
        help='Base commit for comparison (e.g., v6.15, def456)'
    )
    
    args = parser.parse_args()
    
    if args.debug:
        logger.setLevel(logging.DEBUG)
        logger.debug("Debug mode enabled")
    
    # 创建查询工具
    query_tool = BatchJobQuery()
    
    # 检查是否执行对比功能
    if args.compare_commits:
        if not args.head_commit or not args.base_commit:
            logger.error("--compare-commits requires both --head-commit and --base-commit")
            return
        
        logger.info("Performing commit comparison...")
        comparison_results = query_tool.perform_commit_comparison(
            args.head_commit, args.base_commit,
            hours=args.hours,
            suite=args.suite, 
            testbox=args.testbox
        )
        
        if comparison_results:
            print("\n" + "=" * 100)
            print("COMMIT COMPARISON RESULTS")
            print("=" * 100)
            print(query_tool.format_comparison_table(comparison_results))
            print(f"\nTotal comparisons: {len(comparison_results)}")
        else:
            logger.warning("No comparison results generated")
        
        query_tool.disconnect()
        return
    
    # 执行正常查询
    jobs = query_tool.query_jobs(
        hours=args.hours,
        suite=args.suite,
        testbox=args.testbox,
        status=args.status,
        commit=args.commit,
        metric=args.metric,
        limit=args.limit
    )
    
    # 提取错误摘要并存储到jobs字典
    if args.scheduler_log:
        # 添加正则表达式导入
        import re
        # 编译时间戳模式
        timestamp_pattern = re.compile(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}[\+\-]\d{4}')
        
        for job in jobs:
            log_result = query_tool.search_scheduler_logs(
                job_id=job['job_id'],
                log_path=args.scheduler_log,
                context_lines=args.log_context
            )
            # 提取错误行摘要
            error_lines = []
            for line in log_result.split('\n'):
                if "ERROR" in line:
                    # 移除时间戳和日志级别
                    clean_message = timestamp_pattern.sub('', line)
                    clean_message = clean_message.replace("ERROR", "").strip()
                    
                    # 移除上下文标记
                    clean_message = clean_message.replace(">>>", "").replace("    ", "").strip()
                    
                    # 确保非空行
                    if clean_message:
                        error_lines.append(clean_message)
            job['errors'] = error_lines
    
    if not jobs:
        logger.info("No matching jobs found")
        return
    
    logger.info(f"Found {len(jobs)} matching jobs")
    
    # 分析结果
    analysis = query_tool.analyze_results(jobs)
    print(query_tool.format_analysis(analysis))
    
    # 显示任务详情
    if args.show_jobs:
        print("\nJob Details:")
        for job in jobs:
            # 过滤 job_stage
            if args.job_stage:
                if str(job.get('job_stage')) != args.job_stage and job.get('job_stage') != args.job_stage:
                    # 支持用名称或编号过滤
                    if not (job.get('job_stage') and format_stage(job.get('job_stage')).startswith(args.job_stage)):
                        continue
            # 过滤 job_health
            if args.job_health:
                if args.job_health.lower() == 'none':
                    if job.get('job_health') is not None:
                        continue
                else:
                    if str(job.get('job_health')) != args.job_health and job.get('job_health') != args.job_health:
                        if not (job.get('job_health') and format_health(job.get('job_health')).startswith(args.job_health)):
                            continue

            print(f"job_id: {job['job_id']}")
            print(f"suite: {job['suite']}")
            print(f"testbox: {job['testbox']}")
            print(f"job_health: {format_health(job['job_health'])}")
            if 'job_stage' in job:
                print(f"job_stage: {format_stage(job['job_stage'])}")
            print(f"submit_time: {job['submit_time']}")
            if job.get('commit'):
                print(f"commit: {job['commit']}")
            if job.get('host_machine'):
                print(f"host_machine: {job['host_machine']}")
            if job.get('git_repo'):
                print(f"git_repo: {job['git_repo']}")
            if job.get('unixbench_test'):
                print(f"unixbench_test: {job['unixbench_test']}")
            
            # 显示错误摘要（如果有）
            if job.get('errors'):
                print("  错误摘要:")
                for error in job['errors']:
                    print(f"    - {error}")
            
            # 显示指标数据
            if args.show_stats:
                stats = job.get('stats', {})
                if isinstance(stats, str):
                    try:
                        stats = json.loads(stats)
                    except:
                        stats = {}
                
                if stats:
                    print("  指标数据:")
                    for metric, value in stats.items():
                        print(f"    {metric}: {value}")
            
            # 添加日志显示（当满足条件时）
            if args.scheduler_log and (args.show_logs or job.get('job_health') is None):
                log_result = query_tool.search_scheduler_logs(
                    job_id=job['job_id'],
                    log_path=args.scheduler_log,
                    context_lines=args.log_context
                )
                # 格式化日志输出
                print("  调度器日志:")
                for line in log_result.split('\n'):
                    print(f"    {line}")
                stats = job.get('stats', {})
                if isinstance(stats, str):
                    try:
                        stats = json.loads(stats)
                    except:
                        stats = {}
                
                if stats:
                    print("  指标数据:")
                    for metric, value in stats.items():
                        print(f"    {metric}: {value}")
            print("-" * 60)
    
    # 导出结果
    if args.export_csv:
        query_tool.export_to_csv(jobs, args.export_csv)
    
    if args.export_metrics:
        query_tool.export_metrics(jobs, args.export_metrics)
    
    # 断开数据库连接
    query_tool.disconnect()

if __name__ == "__main__":
    main()
