#!/usr/bin/env python3
# SPDX-License-Identifier: MulanPSL-2.0+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.

import time
import json
import os
import re
import subprocess
import yaml
import numpy as np
import sys
import random
import uuid
import threading
from typing import Callable, Any, List, Dict, Optional, Tuple
import atexit
import shutil
from functools import wraps
import traceback
from datetime import datetime
import inspect
import copy
import select
import statistics
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass
from enum import Enum

from bisect_config import BisectConfig
from bisect_log_config import logger, get_logger
from perf_range import Range
from job_md5_utils import calculate_all_params_md5

from bisect_db_http import BisectDBHttp as BisectDB
from bisect_visualizer import BisectVisualizer

# 自定义异常类定义
class BisectError(Exception):
    """Bisect操作的基础异常类"""
    pass

class JobSubmissionError(Exception):
    """Custom exception for job submission failures"""

class JobStatusError(Exception):
    """Base class for job status errors"""

class JobStatusTimeoutError(JobStatusError):
    """Exception for job status polling timeouts"""

class TemporarySystemError(Exception):
    """临时系统错误，可重试"""
    pass

class VerificationError(BisectError):
    """Verification specific errors"""
    pass

class InsufficientSamplesError(VerificationError):
    """Raised when not enough samples could be collected"""
    pass

class VerificationTimeoutError(VerificationError):
    """Raised when verification takes too long"""
    pass

# 可重试错误类型定义
RETRYABLE_ERRORS = (
    JobSubmissionError,        # 作业提交失败
    JobStatusTimeoutError,     # 状态查询超时
    ConnectionError,           # 网络连接问题
    subprocess.TimeoutExpired, # 子进程超时
    TemporarySystemError      # 自定义临时系统错误
)



class GitBisect:
    def __init__(self, logger=None):
        """
        初始化GitBisect实例

        Args:
            logger: 可选的logger实例。如果提供，将使用其session_id；
                   如果不提供，将生成新的session_id和logger

        Raises:
            ValueError: 当提供的logger不符合要求时
            TypeError: 当logger类型不正确时
        """
        # Validate configuration first
        self._validate_config()

        if logger:
            # 验证logger类型和必要属性
            if not hasattr(logger, 'info') or not callable(getattr(logger, 'info')):
                raise TypeError("Provided logger must have callable 'info' method")
            if not hasattr(logger, 'error') or not callable(getattr(logger, 'error')):
                raise TypeError("Provided logger must have callable 'error' method")
            if not hasattr(logger, 'exception') or not callable(getattr(logger, 'exception')):
                raise TypeError("Provided logger must have callable 'exception' method")

            # 依赖注入：使用传入的logger和其session_id
            self.logger = logger
            self.session_id = getattr(logger, 'session_id', None)
            if not self.session_id:
                raise ValueError("Provided logger must have a valid 'session_id' attribute")

            self.logger.debug(f"GitBisect initialized with injected logger, session_id={self.session_id}")
        else:
            # 创建新的session：生成新的session_id和logger
            thread_id = threading.current_thread().ident
            thread_name = threading.current_thread().name
            timestamp = int(time.time() * 1000000)  # 微秒级时间戳
            self.session_id = f"{thread_id % 0xFFFFFFFF:08x}_{timestamp % 0xFFFFFF:06x}"

            # 创建会话特定的日志实例
            self.logger = get_logger(session_id=self.session_id)
            self.logger.debug(f"GitBisect created new session in thread_id={thread_id}, thread_name='{thread_name}', session_id={self.session_id}")

        # Initialize BisectDB with configuration
        self.bisect_db = BisectDB(
            host=os.environ.get('MANTICORE_HOST', 'localhost'),
            port=os.environ.get('MANTICORE_PORT', '9306'),
            database=os.environ.get('MANTICORE_DB', 'jobs')
        )
        self._is_external_repo = False  # 标记是否为外部仓库

        self.bad_job = None
        self.bad_commit = None
        self.good_commit = None
        self.commit_field = None
        self.metric = None
        self.bad_metric_value = None
        self.perf_range = None
        self.mid_point = None  # 新增：性能分界点
        self.direction = None  # 新增：性能方向系数 (1: 越大越好, -1: 越小越好)
        self.v1_samples = []   # 新增：v1版本的性能样本
        self.v2_samples = []   # 新增：v2版本的性能样本
        self.bisect_result_root = None
        self.bad_job_health = None  # 保存bad_job的原始health状态
        self.bisect_start_time = None
        self.bisect_end_time = None
        self.analysis_complete = False
        self.is_build_task = False  # 标记是否为构建任务

        # 实例级别的路径配置
        self.temp_result_root = None
        self.temp_git_base = None

        # Recursion protection flags
        self._in_perf_range_calculation = False
        self._submitting_job = False

    def _validate_config(self):
        """Validate configuration values"""
        if BisectConfig.PERF_VERIFICATION_SAMPLES < 1:
            raise ValueError("PERF_VERIFICATION_SAMPLES must be at least 1")

        if BisectConfig.FUNC_VERIFICATION_RUNS < 1:
            raise ValueError("FUNC_VERIFICATION_RUNS must be at least 1")

        if not 0 < BisectConfig.VERIFICATION_CONFIDENCE_LEVEL <= 1:
            raise ValueError("VERIFICATION_CONFIDENCE_LEVEL must be between 0 and 1")

        if BisectConfig.SKIP_RESULT_MAX_CANDIDATES < 1:
            raise ValueError("SKIP_RESULT_MAX_CANDIDATES must be at least 1")

        # Add timeout validation
        verification_timeout = int(os.environ.get('BISECT_VERIFICATION_TIMEOUT', '3600'))
        if verification_timeout < 60:
            raise ValueError("BISECT_VERIFICATION_TIMEOUT must be at least 60 seconds")



    def find_first_bad_commit(self, task: Dict[str, Any], repo_dir: Optional[str] = None) -> Optional[Dict[str, Any]]:
        """
        查找导致变化的第一个提交（变化点）

        Args:
            task: 任务配置字典
            repo_dir: 可选的外部仓库路径

        Returns:
            包含bisect结果的字典

        Raises:
            RuntimeError: 当二分查找过程中发生不可恢复的错误时
        """
        try:
            self.task = task
            self.error_id = task.get('error_id')
            self.bad_job_id = task.get('bad_job_id') or task.get('target_job_id')
            self.good_commit = task.get('good_commit') or task.get('start_commit') or task.get('baseline_commit')
            self.metric = task.get('bisect_metric')
            self.direction = task.get('direction')
            self.set_log(task.get('bisect_result_root'))
            self.analysis_complete = False
            
            self.repo_dir = repo_dir
            if repo_dir:
                # Check if it's a valid git repository (works for both bare and non-bare)
                try:
                    validate_cmd = ["git", "-C", repo_dir, "rev-parse", "--git-dir"]
                    result = subprocess.run(validate_cmd, capture_output=True, text=True)
                    if result.returncode != 0:
                        self.logger.warning(f"Invalid git repository: {repo_dir}")
                        self.repo_dir = None
                except Exception as e:
                    self.logger.warning(f"Failed to validate git repository {repo_dir}: {e}")
                    self.repo_dir = None

            self.set_bad_job()
            self.set_work_dir()
            
            if self.metric and self.error_id:
                raise ValueError("Cannot use both error_id and metric for bisecting")

            # Setup Phase: Find good commit and performance range
            if not self.good_commit:
                self.logger.info("Searching for good commit...")
                self.good_commit = self.find_good_commit()
                if not self.good_commit:
                    raise RuntimeError("Failed to find valid good commit")
            self.logger.info(f"Bisect range established: {self.good_commit[:8]}(start) → {self.bad_commit[:8]}(end)")

            if self.metric:
                self.logger.info("Calculating performance change detection parameters...")
                auto_calc_direction = self.direction is None
                has_gap = self.check_performance_difference(
                    self.good_commit, self.bad_commit, self.metric, auto_calc_direction=auto_calc_direction
                )
                if not has_gap:
                    raise RuntimeError("Cannot bisect without clear performance difference")
                direction_desc = "higher values are better" if self.direction == 1 else "lower values are better"
                self.logger.info(f"Performance detection configured: mid_point={self.mid_point:.2f}, direction={self.direction} ({direction_desc})")

            # Execution Phase
            self.logger.info("Starting bisect process")
            try:
                bisect_result = self.run_bisect()
            except Exception as bisect_err:
                # Provide detailed context about bisect execution failure
                error_context = [
                    f"Bisect execution failed: {str(bisect_err)}",
                    f"Task configuration:",
                    f"  - bad_job_id: {self.bad_job_id}",
                    f"  - good_commit: {self.good_commit[:8] if self.good_commit else 'None'}",
                    f"  - bad_commit: {self.bad_commit[:8] if self.bad_commit else 'None'}",
                    f"  - work_dir: {self.work_dir}",
                ]
                if self.metric:
                    error_context.append(f"  - metric: {self.metric}")
                    error_context.append(f"  - mid_point: {self.mid_point}")
                    error_context.append(f"  - direction: {self.direction}")
                elif self.error_id:
                    error_context.append(f"  - error_id: {self.error_id}")

                self.logger.error("\n".join(error_context))
                raise

            if bisect_result:
                result = self.analyse_result(bisect_result)
                self.logger.info(f"Bisect completed successfully")
                self.logger.info(f"Change point: {result.get('change_point', result.get('first_bad_commit'))}")
                self.logger.info(f"Description: {result.get('change_description', 'N/A')}")

                # Generate and save visualizations after successful bisect
                viz_path = self._generate_and_save_visualizations(result)
                if viz_path:
                    result['visualization_path'] = viz_path
                    self.logger.info(f"Visualizations saved to: {viz_path}")

                self.cleanup_temp_files()
                return result
            else:
                # This should not happen now since run_bisect raises exception on empty result
                # But keep it as a safety net
                error_context = (
                    f"Bisect returned None/empty result unexpectedly\n"
                    f"Configuration: bad_job={self.bad_job_id}, "
                    f"range={self.good_commit[:8]}..{self.bad_commit[:8]}, "
                    f"mode={'performance' if self.metric else 'error'}"
                )
                self.logger.error(error_context)
                raise RuntimeError(f"Bisect process returned None/empty result. Check logs for details.")

        except Exception as e:
            # Enhanced error logging with full context
            error_summary = [
                "=" * 80,
                "BISECT TASK FAILED",
                "=" * 80,
                f"Error: {str(e)}",
                f"Error type: {type(e).__name__}",
                "",
                "Task Context:",
                f"  bad_job_id: {self.bad_job_id}",
                f"  error_id: {self.error_id if self.error_id else 'N/A'}",
                f"  metric: {self.metric if self.metric else 'N/A'}",
                f"  good_commit: {self.good_commit[:8] if self.good_commit else 'Not set'}",
                f"  bad_commit: {self.bad_commit[:8] if self.bad_commit else 'Not set'}",
                f"  work_dir: {self.work_dir if hasattr(self, 'work_dir') else 'Not set'}",
                f"  result_root: {self.temp_result_root if self.temp_result_root else 'Not set'}",
                "=" * 80,
            ]

            self.logger.error("\n".join(error_summary))
            self.logger.exception("Full exception traceback:")

            # Re-raise with more context in the message
            if isinstance(e, RuntimeError) and "Bisect" in str(e):
                # Already a well-formatted bisect error, re-raise as-is
                raise
            else:
                # Wrap other exceptions with context
                raise RuntimeError(
                    f"Bisect task failed: {str(e)}\n"
                    f"bad_job_id={self.bad_job_id}, "
                    f"range={self.good_commit[:8] if self.good_commit else 'N/A'}..{self.bad_commit[:8] if self.bad_commit else 'N/A'}, "
                    f"See logs for full details."
                ) from e

    def cleanup_temp_files(self) -> None:
        """
        清理临时文件，保留重要的结果和日志
        """
        if not self.temp_result_root or not os.path.exists(self.temp_result_root):
            return

        self.logger.info("Starting to clean up temporary files...")

        # 如果分析未完成，保留所有文件以便调试
        if not hasattr(self, 'analysis_complete') or not self.analysis_complete:
            self.logger.info("Analysis incomplete, keeping all temporary files for debugging")
            return

        try:
            # 清理不再需要的git仓库
            if os.path.exists(self.temp_git_base):
                current_work_dir = getattr(self, 'work_dir', None)

                # 删除内部Git仓库
                if (current_work_dir and 
                    os.path.exists(current_work_dir) and 
                    not self._is_external_repo and
                    os.path.exists(os.path.join(current_work_dir, '.git'))):
                    
                    self.logger.info(f"Deleting local git directory: {current_work_dir}")
                    try:
                        shutil.rmtree(current_work_dir, ignore_errors=True)
                        self.logger.info("Git directory deleted successfully")
                    except Exception as e:
                        self.logger.error(f"Failed to delete git directory: {str(e)}")

                # 删除其他git仓库
                for item in os.listdir(self.temp_git_base):
                    dir_path = os.path.join(self.temp_git_base, item)
                    if os.path.isdir(dir_path) and dir_path != current_work_dir:
                        if os.path.exists(os.path.join(dir_path, '.git')):
                            shutil.rmtree(dir_path)

            self.logger.info("Temporary file cleanup completed")

        except Exception as e:
            self.logger.error(f"Error cleaning up temporary files: {e}")

    def _extract_github_repo(self, full_text_kv):
        """从 full_text_kv 中提取 Git 仓库名和URL"""
        if not full_text_kv:
            return None, None

        try:
            # 查找 ss.linux._url= 或 pp.makepkg._url= 模式
            url_match = re.search(r'(?:ss.linux._url|pp.makepkg._url)=([^\s]+)', full_text_kv)
            if url_match:
                url = url_match.group(1)
                # 标准化URL格式 - 只处理 git+https，不处理原生 git://
                if url.startswith("git+https://"):
                    url = url.replace("git+https://", "https://", 1)
                
                # 从 URL 中提取仓库名
                # 支持格式: git+https://mirrors.tuna.tsinghua.edu.cn/git/linux-stable.git
                # 或者: https://github.com/user/repo.git
                repo_match = re.search(r'/([^/]+?)(?:\.git)?/?$', url)
                if repo_match:
                    repo_name = repo_match.group(1)
                    return repo_name, url
            return None, None
        except:
            return None, None

    def _save_complete_bisect_log(self, bisect_result: str):
        """保存完整的 bisect 日志"""
        try:
            # 获取 git bisect log
            git_bisect_log_cmd = ["git", "-C", self.work_dir, "bisect", "log"]
            git_bisect_log = subprocess.run(
                git_bisect_log_cmd, 
                capture_output=True, 
                text=True, 
                check=True
            ).stdout.strip()
            
            # 获取 git log --oneline 的简要历史
            git_log_cmd = ["git", "-C", self.work_dir, "log", "--oneline", "-10"]
            git_log = subprocess.run(
                git_log_cmd,
                capture_output=True,
                text=True,
                check=True
            ).stdout.strip()
            
            # 构建完整的日志内容
            complete_log = [
                "=" * 80,
                f"Git Bisect Complete Log - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
                "=" * 80,
                "",
                "## Bisect Configuration ##",
                f"Work Directory: {self.work_dir}",
                f"Bad Commit: {self.bad_commit}",
                f"Good Commit: {self.good_commit}",
                f"Bad Job ID: {self.bad_job_id}",
                f"Error ID: {self.error_id}" if self.error_id else f"Metric: {self.metric}",
                f"Start Time: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.bisect_start_time)) if self.bisect_start_time else 'N/A'}",
                f"End Time: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.bisect_end_time)) if self.bisect_end_time else 'N/A'}",
                "",
                "## Git Bisect Log ##",
                git_bisect_log,
                "",
                "## Recent Commits ##",
                git_log,
                "",
                "## Bisect Result ##",
                bisect_result,
                "",
                "=" * 80,
            ]
            
            # 保存到文件
            log_file_path = os.path.join(self.temp_result_root, 'complete_bisect.log')
            with open(log_file_path, 'w', encoding='utf-8') as f:
                f.write('\n'.join(complete_log))
            
            self.logger.info(f"Complete bisect log saved to: {log_file_path}")
            
            # 同时输出到控制台（可选，通过环境变量控制）
            if os.environ.get('BISECT_SHOW_COMPLETE_LOG', '').lower() in ('1', 'true', 'yes'):
                print('\n'.join(complete_log))
                
        except Exception as e:
            self.logger.error(f"Failed to save complete bisect log: {str(e)}")

    def _reset_repo_to_clean_state(self, repo_path: str) -> bool:
        """
        Reset repository to clean state before bisect

        Args:
            repo_path: Path to the git repository

        Returns:
            True if successful, False otherwise
        """
        try:
            # 1. Stop any ongoing bisect
            try:
                subprocess.run(
                    ['git', '-C', repo_path, 'bisect', 'reset'],
                    capture_output=True,
                    text=True,
                    timeout=30
                )
                self.logger.debug(f"Reset any ongoing bisect in {repo_path}")
            except subprocess.SubprocessError:
                # Ignore if no bisect is running
                pass

            # 2. Reset to HEAD, discarding all changes
            reset_cmd = ['git', '-C', repo_path, 'reset', '--hard', 'HEAD']
            result = subprocess.run(reset_cmd, capture_output=True, text=True, check=True, timeout=60)
            self.logger.info(f"Reset repository to clean state: {repo_path}")

            # 3. Clean untracked files and directories
            clean_cmd = ['git', '-C', repo_path, 'clean', '-fd']
            result = subprocess.run(clean_cmd, capture_output=True, text=True, check=True, timeout=60)
            self.logger.info(f"Cleaned untracked files in {repo_path}")

            return True

        except subprocess.TimeoutExpired as e:
            self.logger.error(f"Timeout while resetting repository: {repo_path}")
            return False
        except subprocess.CalledProcessError as e:
            self.logger.error(f"Failed to reset repository {repo_path}: {e.stderr}")
            return False
        except Exception as e:
            self.logger.error(f"Unexpected error resetting repository {repo_path}: {str(e)}")
            return False

    def set_work_dir(self):
        """
        Set up the working directory by cloning the repository and checking out the specified commit.
        """
        # 优先使用外部仓库
        if self.repo_dir:
            self.logger.info(f"Using external repository: {self.repo_dir}")
            self.work_dir = self.repo_dir
            self._is_external_repo = True  # 标记为外部仓库

            return
        
        # 优先尝试从 full_text_kv 中提取 URL（统一方式）
        git_url = None
        if hasattr(self, 'bad_job') and self.bad_job and self.bad_job.get('full_text_kv'):
            _, git_url = self._extract_github_repo(self.bad_job['full_text_kv'])
        
        # 如果 full_text_kv 方式失败，回退到原有方式
        if not git_url:
            if 'program.makepkg.commit' == self.commit_field:
                if '_url' in self.bad_job['program']['makepkg']:
                    git_url = self.bad_job['program']['makepkg']['_url']
            elif self.commit_field == 'ss.linux.commit':
                if '_url' in self.bad_job['ss']['linux']:
                    git_url = self.bad_job['ss']['linux']['_url']
            
            # 标准化URL格式（原有逻辑）- 只处理 git+https，不处理原生 git://
            if git_url:
                if git_url.startswith("git+https://"):
                    git_url = git_url.replace("git+https://", "https://", 1)
        
        # 验证 URL 存在性
        if not git_url:
            # 获取更具体的错误信息
            if self.commit_field == 'program.makepkg.commit':
                field_path = "program.makepkg._url or full_text_kv"
            else:
                field_path = "ss.linux._url or full_text_kv"
            
            self.logger.error(error_msg)
            raise ValueError(error_msg)
        
        self.logger.debug(f"Extracted URL: {git_url}")
        
        # 克隆新仓库
        self.work_dir = self.clone_repo(git_url)
        self._is_external_repo = False  # 标记为内部仓库
        self.logger.info(f"Cloned new repository to: {self.work_dir}")

    def set_log(self, result_root: str):
        """带验证的日志路径设置"""
        # 参数类型验证
        if not isinstance(result_root, (str, bytes, os.PathLike)):
            raise TypeError(
                f"result_root must be path-like object, got {type(result_root)}"
            )

        # 路径规范化
        result_root = os.path.abspath(str(result_root))

        # 目录存在性检查
        try:
            os.makedirs(result_root, mode=0o755, exist_ok=True)
            if not os.access(result_root, os.W_OK):
                raise PermissionError(f"Write permission denied: {result_root}")
        except OSError as e:
            raise RuntimeError(f"Failed to create result directory: {e}") from None

        # 初始化配置
        self.temp_result_root = result_root
        self.temp_git_base = os.path.join(result_root, 'git_repos')

        # 确保git目录存在
        os.makedirs(self.temp_git_base, exist_ok=True)

        # 注册清理函数，确保只注册一次
        if not BisectConfig.CLEANUP_REGISTERED:
            atexit.register(self.cleanup_temp_files)
            BisectConfig.CLEANUP_REGISTERED = True

        # 配置会话特定的日志
        self.logger = get_logger(session_id=self.session_id, log_dir=result_root)
        # 确保文件handler被正确配置
        self.logger.configure(log_dir=result_root)

        # 立即测试日志是否工作
        self.logger.info("TEST: Log initialization successful")

        # 记录会话开始信息
        task_info = getattr(self, 'task', {})
        self.logger.info("Bisect session started", task=task_info)
        self.logger.info("Using HTTP-based database client")

    def _detect_build_task(self, job_dict: dict) -> bool:
        """
        Detect if a job is a build/compile task based on its configuration.

        Build tasks are those that compile source code (kernel, packages, etc.)
        and may generate build artifacts. They are distinguished from test tasks
        (functional tests, performance benchmarks) which run pre-compiled code.

        Detection logic:
        1. suite == 'makepkg': Package build tasks (e.g., building user-space packages)
        2. Has 'ss.linux.commit' WITHOUT a test suite: Kernel build tasks
        3. Has 'ss.linux.commit' WITH a test suite: Kernel test tasks (NOT build)

        Args:
            job_dict: Job dictionary containing suite, ss, and program fields

        Returns:
            bool: True if this is a build task, False if it's a test task
                  (test tasks include both functional and performance tests)

        Note:
            This method does NOT distinguish between functional and performance tests.
            Both are considered non-build tasks (return False).
            The distinction between functional and performance is made at the bisect level:
            - Error bisect (self.error_id): Searching for error introduction
            - Performance bisect (self.metric): Searching for performance regression
        """
        # Build/compile tasks: suite == 'makepkg'
        suite = job_dict.get('suite')
        if suite == 'makepkg':
            self.logger.debug(f"Task detected as BUILD: suite=makepkg")
            return True

        # Check if this is a kernel-related job
        if 'ss' in job_dict and isinstance(job_dict['ss'], dict):
            linux_subsystem = job_dict['ss'].get('linux', {})
            if 'commit' in linux_subsystem:
                # If there's a specific test suite defined, this is a TEST task, not BUILD
                # Test suites include: boot, will-it-scale, fio, etc.
                # Build tasks typically don't have a suite field or have suite='kernel' or 'build'
                if suite and suite not in ['kernel', 'build', None]:
                    self.logger.debug(f"Task detected as TEST: has ss.linux.commit but suite={suite} (test suite)")
                    return False

                # If no suite or suite is 'kernel'/'build', this is a BUILD task
                self.logger.debug(f"Task detected as BUILD: has ss.linux.commit, suite={suite}")
                return True

        self.logger.debug(f"Task detected as TEST: suite={suite}, no build indicators")
        return False

    def set_bad_job(self) -> None:
        """
        Identifies the problematic commit associated with a failed build job

        Determines the commit reference based on build job type:
        1. For pkgbuild suite jobs: Uses upstream repository commit
        2. For standard system (SS) builds: Uses Linux kernel subsystem commit

        Initializes instance variables:
        - bad_commit: Commit ID to use as bad reference for bisect
        - commit_field: Path to commit field in job data (for diagnostics)
        """
        try:
            # Initialize failed job data structure
            self.bad_job = self.init_job_content(self.bad_job_id)
            # 保存bad_job的原始health状态，用于后续bisect状态判断
            self.bad_job_health = self.bad_job.get('job_health', 'unknown')
            self.bad_job.pop('job_health')
            self.logger.info(f"Bad job health status: {self.bad_job_health}")
        except ValueError as e:
            self.logger.error(f"Invalid job ID - original value: {self.bad_job_id}")
            raise RuntimeError(f"Task ID validation failed: {str(e)}") from None

        # Identify task type using unified detection method
        self.is_build_task = self._detect_build_task(self.bad_job)

        # Determine bisect type for better logging
        bisect_type_desc = 'performance' if self.metric else ('error' if self.error_id else 'unknown')
        task_type_desc = 'build/compile' if self.is_build_task else 'test (functional/performance)'

        self.logger.info(f"Task classification: {task_type_desc}, Bisect mode: {bisect_type_desc}")
        self.logger.info(f"Suite: {self.bad_job.get('suite', 'N/A')}, has_ss: {bool(self.bad_job.get('ss'))}")

        # Important: build_stage checking ONLY applies to build tasks
        if self.is_build_task:
            self.logger.info("Build_stage checking will be used for failure analysis")
        else:
            self.logger.info("Build_stage checking will be skipped (test task)")

        # Handle pkgbuild suite cases first
        if self.bad_job.get('suite') == 'makepkg' and self.bad_job['program']['makepkg'].get('commit'):
            # Use upstream repository commit (package builds)
            self.bad_commit = self.bad_job['program']['makepkg'].get('commit')
            self.commit_field = 'program.makepkg.commit'

        # Handle standard system builds
        elif 'ss' in self.bad_job and isinstance(self.bad_job['ss'], dict):
            # Extract Linux kernel subsystem commit
            linux_subsystem = self.bad_job['ss'].get('linux', {})
            if 'commit' in linux_subsystem:
                self.bad_commit = linux_subsystem['commit']
                self.commit_field = 'ss.linux.commit'
            else:
                raise ValueError("Missing Linux subsystem commit in SS build job")
            if self.metric:
                stats = self.bad_job.get("stats", {})
                try:
                    self.bad_metric_value = float(stats.get(self.metric, 0))
                    if not self.bad_metric_value:
                        self.logger.error(f"Bad metric {self.metric} not found in job stats")
                        raise ValueError(f"Invalid metric value for {self.metric}")
                except (TypeError, ValueError) as e:
                    self.logger.error(f"Invalid metric value: {e}")
                    raise ValueError(f"Invalid metric value for {self.metric}")
        else:
            raise RuntimeError(f"Unrecognized build job type: {self.bad_job.get('suite')}")


    def run_git_command(self, args, cwd=None):
        """Execute git command with forced English output"""
        env = os.environ.copy()
        env["LANG"] = "C"
        env["LC_ALL"] = "C"
        
        try:
            result = subprocess.run(
                ["git"] + args,
                cwd=cwd,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                text=True,
                check=True
            )
            return result.stdout.strip() if result.stdout else ""
        except subprocess.CalledProcessError as e:
            self.logger.error(f"Git command failed: {e.stderr}")
            raise

    def clone_repo(self, repo_url: str, persistent=False) -> Optional[str]:
        """
        Unified clone method, supports persistent and temporary modes
        :param repo_url: Repository URL
        :param persistent: True=persistent mode (daily testing), False=temporary mode (bisect)
        :return: Repository path
        """
        # Determine base directory
        if persistent:
            # Use environment variable or default path
            base_dir = os.environ.get('REPO_BASE_DIR', os.path.expanduser("~/auto_test_repos"))
        else:
            base_dir = self.temp_git_base
        
        # Create base directory
        os.makedirs(base_dir, exist_ok=True)
        repo_name = os.path.basename(repo_url).replace('.git', '')
        repo_root = os.path.join(base_dir, repo_name)
        max_retries = BisectConfig.GIT_CLONE_RETRIES
        retry_delay = BisectConfig.GIT_CLONE_RETRY_DELAY
        git_sh_path = os.path.join(os.environ.get('LKP_SRC', ''), 'lib/git.sh')

        # Check if directory exists
        if os.path.exists(os.path.join(repo_root, '.git')):
            self.logger.info(f"Found existing repository: {repo_root}")
            try:
                # Validate remote URL
                remote_check = ['git', '-C', repo_root, 'config', '--get', 'remote.origin.url']
                existing_url = self.run_command_real_time(remote_check).strip()
                if existing_url != repo_url:
                    self.logger.warning(f"Repository URL mismatch, deleting and re-cloning.")
                    self.logger.info(f"Existing: {existing_url}")
                    self.logger.info(f"Expected: {repo_url}")
                    shutil.rmtree(repo_root)
                    # Continue to clone logic
                else:
                    # URL matches, use git.sh's git_clone_update to update the repo and switch to the latest HEAD of the default branch
                    self.logger.info("Updating existing repository...")
                    try:
                        # 0. Reset repository to clean state before updating
                        if not self._reset_repo_to_clean_state(repo_root):
                            self.logger.warning(f"Failed to reset existing repository, attempting to delete and re-clone")
                            shutil.rmtree(repo_root)
                            # Continue to clone logic below
                        else:
                            # 1. Fetch remote branch info first
                            fetch_cmd = ['git', '-C', repo_root, 'fetch', 'origin']
                            self.run_command_real_time(fetch_cmd)

                            # 2. Get remote default branch name
                            default_branch = 'master'  # Default value
                            try:
                                # Try to get remote default branch
                                default_branch_cmd = ['git', '-C', repo_root, 'symbolic-ref', 'refs/remotes/origin/HEAD']
                                result = subprocess.run(default_branch_cmd, capture_output=True, text=True, check=True)
                                default_branch = result.stdout.strip().replace('refs/remotes/origin/', '')
                            except subprocess.CalledProcessError:
                                # If it fails, try common default branch names
                                for branch in ['main', 'master']:
                                    check_cmd = ['git', '-C', repo_root, 'rev-parse', '--verify', f'origin/{branch}']
                                    if subprocess.run(check_cmd, capture_output=True, stderr=subprocess.DEVNULL).returncode == 0:
                                        default_branch = branch
                                        break

                            # 3. Use git.sh's git_clone_update, specifying the correct branch
                            update_cmd = [
                                'bash', '-c',
                                f'source {git_sh_path} && cd "{os.path.dirname(repo_root)}" && git_clone_update "{repo_url}" "{os.path.basename(repo_root)}" --branch {default_branch}'
                            ]
                            update_result = self.run_command_real_time(update_cmd)
                            self.logger.info(f"Repository updated and switched to the latest {default_branch} branch successfully")

                            # Validate repository status (works for both bare and non-bare)
                            validate_cmd = ["git", "-C", repo_root, "rev-parse", "--git-dir"]
                            result = subprocess.run(validate_cmd, capture_output=True, text=True, check=True)

                            if result.returncode == 0:
                                self.logger.info(f"Using updated existing repository: {repo_root}")
                                return repo_root
                            else:
                                self.logger.warning("Repository validation failed, deleting and re-cloning")
                                shutil.rmtree(repo_root)
                            
                    except subprocess.CalledProcessError as e:
                        self.logger.warning(f"Failed to update repository: {e.stderr if hasattr(e, 'stderr') else str(e)}")
                        self.logger.info("Deleting existing repository and re-cloning...")
                        shutil.rmtree(repo_root)
                        # Continue to clone logic
                        
            except subprocess.CalledProcessError as e:
                self.logger.error(f"Existing repository operation failed: {e.stderr if hasattr(e, 'stderr') else str(e)}")
                self.logger.info("Deleting problematic repository and re-cloning...")
                shutil.rmtree(repo_root)

        # Keep original cleanup logic
        if os.path.exists(repo_root) and shutil.rmtree(repo_root):
            self.logger.info(f"remove {repo_root}")
        for attempt in range(max_retries):
            try:
                self.logger.info(f"Cloning repository {repo_url} to {repo_root} (attempt {attempt+1}/{max_retries})")
                
                # Clone with git.sh
                clone_cmd = [
                    'bash', '-c',
                    f'source {git_sh_path} && git clone {repo_url} {repo_root}'
                ]
                self.run_command_real_time(clone_cmd)
 
                # Validate repository integrity
                try:
                    # Check if it's a valid git repository (works for both bare and non-bare)
                    validate_cmd = ["git", "-C", repo_root, "rev-parse", "--git-dir"]
                    result = subprocess.run(validate_cmd, capture_output=True, text=True, check=True)

                    # Verify commit history is accessible
                    log_cmd = ["git", "-C", repo_root, "log", "-1", "--oneline"]
                    subprocess.run(log_cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

                except subprocess.CalledProcessError as e:
                    self.logger.error(f"Repository integrity check failed: {e.stderr}")
                    shutil.rmtree(repo_root, ignore_errors=True)
                    raise CloneError(repo_url, max_retries) from e

                return repo_root

            except subprocess.CalledProcessError as e:
                self.logger.error(f"Git operation failed: {e.stderr if hasattr(e, 'stderr') else str(e)}")
                if os.path.exists(repo_root):
                    shutil.rmtree(repo_root, ignore_errors=True)
                if attempt == max_retries - 1:
                    raise CloneError(repo_url, max_retries) from e
                time.sleep(retry_delay * (attempt+1))

            except Exception as e:
                self.logger.error(f"Unexpected error: {e}")
                if os.path.exists(repo_root):
                    shutil.rmtree(repo_root, ignore_errors=True)
                raise


    def find_good_commit(self):
        """
        查找好提交的主方法：
        - 对于性能 bisect：通过性能采样自动找到性能差异明显的历史提交
        - 对于错误 bisect：
          1. 先通过递归RC标签策略查找
          2. 然后通过 find_good_commit_by_tag 查找稳定版本
          3. 最后使用 find_good_commit_by_job 通过时间范围查找

        :return: Good commit hash if found, otherwise None
        """
        # 记录bisect开始时间
        if self.bisect_start_time is None:
            self.bisect_start_time = int(time.time())
            self.logger.info(f"Bisect开始时间记录: {self.bisect_start_time}")

        # 性能 bisect 的特殊处理：自动寻找性能差异明显的提交
        if self.metric:
            self.logger.info("Performance bisect: searching for commit with significant performance difference...")
            good_commit = self.find_good_commit_by_performance_sampling()
            if good_commit:
                self.logger.info(f"Found good commit by performance sampling: {good_commit}")
                return good_commit
            else:
                self.logger.warning("Performance sampling failed, falling back to standard method")

        # 错误 bisect 或性能采样失败时的标准流程
        last_failed_tag = None

        # 1. 首先尝试递归RC标签查找
        self.logger.info("尝试通过递归RC标签查找...")
        good_commit, last_tag = self.find_good_commit_by_recursive_rc()
        if good_commit:
            self.logger.info(f"递归RC标签中找到好提交: {good_commit}")
            return good_commit
        last_failed_tag = last_tag  # 记录最后一个失败的标签

        # 2. 尝试通过标签查找稳定提交（内部已完成测试验证）
        self.logger.info("尝试通过标签查找稳定提交...")
        good_commit = self.find_good_commit_by_tag()
        if good_commit:
            self.logger.info(f"标签中找到经过测试的好提交: {good_commit}")
            return good_commit

        # 3. 标签方法失败，按时间往前查找已知好提交
        if last_failed_tag:
            self.logger.info(f"Searching for good commit by time range from tag {last_failed_tag}...")
            good_commit = self.find_good_commit_by_job(start_commit=last_failed_tag)
        else:
            self.logger.info("Tag search failed, searching for known good commits by time range...")
            good_commit = self.find_good_commit_by_job()

        if good_commit:
            self.logger.info(f"Found good commit in time range: {good_commit}")
            return good_commit

        # Return None if no good commit is found
        return None

    def find_good_commit_by_performance_sampling(self, day_agos=[90, 180, 365]) -> Optional[str]:
        """
        通过性能采样自动找到与 bad_commit 性能差异明显的历史提交

        工作流程：
        1. 从数据库或历史提交中采样多个时间点
        2. 对每个时间点收集性能指标样本
        3. 使用 check_performance_difference 验证是否有明显差异
        4. 如果有差异，自动设置 mid_point 和 direction，并返回该提交

        Args:
            day_agos: 要尝试的历史时间点（天数）

        Returns:
            性能差异明显的 good_commit，如果未找到则返回 None
        """
        self.logger.info("Starting performance-based good commit search...")
        self.logger.info(f"Bad commit: {self.bad_commit[:8]}, metric: {self.metric}")

        # 获取候选历史提交
        commits = self.get_test_commits(self.work_dir, self.bad_commit, day_agos)
        if not commits:
            self.logger.warning("No historical commits found for performance sampling")
            return None

        self.logger.info(f"Found {len(commits)} candidate commits: {[c[:8] for c in commits]}")

        # 尝试每个候选提交
        for i, candidate_commit in enumerate(commits):
            self.logger.info(f"Testing candidate {i+1}/{len(commits)}: {candidate_commit[:8]}")

            # 验证是否为祖先
            if not self._is_ancestor(candidate_commit, self.bad_commit):
                self.logger.warning(f"Skipping {candidate_commit[:8]}: not an ancestor of bad_commit")
                continue

            try:
                # 使用 check_performance_difference 检查性能差异
                # 如果有明显差异，此方法会自动设置 self.mid_point 和 self.direction
                auto_calc_direction = self.direction is None
                has_gap = self.check_performance_difference(
                    candidate_commit,  # v1 (earlier)
                    self.bad_commit,   # v2 (later)
                    self.metric,
                    sample_count=3,
                    auto_calc_direction=auto_calc_direction
                )

                if has_gap:
                    self.logger.info(f"✓ Found good commit with clear performance gap: {candidate_commit[:8]}")
                    self.logger.info(f"  Performance threshold: mid_point={self.mid_point:.2f}, direction={self.direction}")
                    return candidate_commit
                else:
                    self.logger.info(f"✗ No clear gap between {candidate_commit[:8]} and {self.bad_commit[:8]}")

            except Exception as e:
                self.logger.warning(f"Failed to check performance for {candidate_commit[:8]}: {e}")
                continue

        self.logger.warning("Performance sampling did not find a good commit with clear performance gap")
        return None

    def find_good_commit_by_recursive_rc(self, max_attempts=10):
        """
        Find a good commit by recursively searching for older RC tags with an exponential backoff strategy.
        
        :param max_attempts: Maximum number of tags to test.
        :return: (good_commit, last_tested_tag) A tuple of the good commit and the last tag tested.
        """
        try:
            git_base_rc_script = os.path.join(os.environ.get('LKP_SRC', ''), 'lib/git-base-rc-tag.sh')
            if not os.path.exists(git_base_rc_script):
                self.logger.info("git-base-rc-tag.sh not found, skipping recursive RC search.")
                return None, None
            
            tested_tags = set()
            current_commit = self.bad_commit
            last_tested_tag = None
            jumps = 1

            for attempt in range(max_attempts):
                self.logger.info(f"Attempt {attempt + 1}: Looking for an older RC tag, jumping back {jumps} versions.")
                
                tag_candidate = current_commit
                
                # Perform exponential jumps to find the next candidate tag
                for i in range(jumps):
                    cmd = ['bash', git_base_rc_script, '-C', self.work_dir, tag_candidate]
                    try:
                        result = subprocess.run(
                            cmd, capture_output=True, text=True, check=True, timeout=300
                        )
                        found_tag = result.stdout.strip()
                        if not found_tag:
                            self.logger.info(f"Could not jump back further from {tag_candidate}.")
                            tag_candidate = None
                            break
                        tag_candidate = found_tag
                    except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
                        self.logger.warning(f"Failed to find previous tag for {tag_candidate}: {e}")
                        tag_candidate = None
                        break
                
                if not tag_candidate or tag_candidate in tested_tags:
                    self.logger.info("No new RC tag found or already tested, stopping search.")
                    break
                
                tag = tag_candidate
                tested_tags.add(tag)
                last_tested_tag = tag
                self.logger.info(f"Found candidate RC tag: {tag}")
                
                if not self._is_ancestor(tag, self.bad_commit):
                    self.logger.warning(f"Tag {tag} is not an ancestor of the bad commit, skipping.")
                    current_commit = tag
                    jumps *= 2  # Still increase jumps for the next attempt
                    continue
                
                tag_status = self.get_commit_status_by_job(tag)
                self.logger.info(f"Tag {tag} status: {tag_status}")
                
                if tag_status == 'good':
                    self.logger.info(f"Found good RC tag: {tag}")
                    return tag, tag
                
                # If bad or skip, this becomes the new baseline for the next jump
                current_commit = tag
                jumps *= 2

            self.logger.info(f"Recursive RC search finished after testing {len(tested_tags)} tags.")
            return None, last_tested_tag
            
        except Exception as e:
            self.logger.error(f"Recursive RC tag search failed: {str(e)}")
            return None, None

    def find_good_commit_by_db(self) -> Optional[str]:
        """
        从数据库查找不同commit但配置相同的已知好提交
        
        查询条件：
        1. 与坏作业相同的仓库URL和项目名称
        2. 排除坏提交本身
        3. 状态为'good'的提交
        4. 在坏提交之前的提交
        
        :return: 好提交的哈希值，若未找到则返回None
        """
        try:
            # 获取仓库标识信息
            if self.commit_field == 'program.makepkg.commit':
                repo_url = self.bad_job['program']['makepkg'].get('_url')
                project = self.bad_job['program']['makepkg'].get('project')
            else:
                repo_url = self.bad_job['ss']['linux'].get('_url')
                project = self.bad_job['ss']['linux'].get('project')
            
            if not repo_url or not project:
                self.logger.warning("缺少仓库URL或项目名称，跳过数据库查询")
                return None

            # 构建基础查询条件（排除commit字段）
            # 注意：不能修改 all_params_md5，因为数据库中存储的 MD5 是包含 commit 的
            # 应该使用字段匹配查询而不是 MD5 查询
            base_job = copy.deepcopy(self.bad_job)  # 使用深拷贝避免修改原始对象
            if self.commit_field == 'program.makepkg.commit':
                base_job['program']['makepkg'].pop('commit', None)
            else:
                base_job['ss']['linux'].pop('commit', None)

            # 使用字段匹配查询（不使用 MD5，因为数据库中的 MD5 包含 commit）
            candidate_jobs = self.bisect_db.check_existing_job(
                base_job,
                limit=20,
                time_range_days=BisectConfig.JOB_REUSE_TIME_WINDOW_DAYS
            )

            if not candidate_jobs:
                self.logger.info("数据库中没有找到相似配置的作业")
                return None
            
            # 提取并过滤有效提交
            valid_commits = []
            for job_id, _ in candidate_jobs:
                job_info = self.bisect_db.get_job_info(job_id)
                if not job_info:
                    continue
                    
                # 获取候选提交
                candidate_commit = self._get_commit_from_job(job_info)
                if not candidate_commit or candidate_commit == self.bad_commit:
                    continue
                    
                # 验证作业状态
                if self.error_id:
                    # 使用同样的模糊匹配逻辑
                    if self._check_error_id(job_info.get('stats', {}), self.error_id) == 'bad':
                        continue  # 包含相同错误则跳过
                elif self.metric:
                    if job_info.get('job_health') != 'success':
                        continue  # 非健康状态跳过
                
                # 验证提交顺序
                if self._is_ancestor(candidate_commit, self.bad_commit):
                    self.logger.debug(f"找到候选好提交: {candidate_commit[:7]}")
                    valid_commits.append(candidate_commit)
            
            if not valid_commits:
                self.logger.info("没有找到符合条件的有效提交")
                return None
            
            # 返回最接近坏提交的好提交
            best_commit = self._find_nearest_commit(valid_commits)
            self.logger.info(f"找到最佳好提交: {best_commit[:7]}")
            return best_commit
        
        except Exception as e:
            self.logger.error(f"数据库查询失败: {str(e)}")
            return None

    def find_good_commit_by_tag(self) -> Optional[str]:
        """
        通过 git-base-rc-tag.sh 或标签查找好提交，内部完成测试验证
        
        1. 优先使用 git-base-rc-tag.sh 获取稳定标签
        2. 验证标签对应的提交状态，如果没有已完成作业则提交测试
        3. 返回经过验证的好提交
        
        :return: 经过测试验证的好提交哈希值，若未找到则返回None
        """
        try:
            # 1. 优先使用 git-base-rc-tag.sh 获取稳定标签
            git_base_rc_script = os.path.join(os.environ.get('LKP_SRC', ''), 'lib/git-base-rc-tag.sh')
            tag = None
            
            if os.path.exists(git_base_rc_script):
                self.logger.info("使用 git-base-rc-tag.sh 查找稳定标签...")
                try:
                    # 构建命令参数
                    cmd = ['bash', git_base_rc_script, '-C', self.work_dir, self.bad_commit]
                    
                    result = subprocess.run(
                        cmd,
                        capture_output=True,
                        text=True,
                        check=True,
                        cwd=self.work_dir
                    )
                    tag = result.stdout.strip() if result.stdout else None
                    if tag:
                        self.logger.info(f"git-base-rc-tag.sh 找到标签: {tag}")
                    else:
                        self.logger.info("git-base-rc-tag.sh 未找到合适标签")
                except subprocess.CalledProcessError as e:
                    self.logger.warning(f"git-base-rc-tag.sh 执行失败: {e}")
                    self.logger.warning(f"命令: {' '.join(cmd)}")
                    self.logger.warning(f"返回码: {e.returncode}")
                    if e.stdout:
                        self.logger.warning(f"标准输出: {e.stdout}")
                    if e.stderr:
                        self.logger.warning(f"标准错误: {e.stderr}")
                    tag = None
            else:
                self.logger.info("git-base-rc-tag.sh 不存在")

            # 2. 如果 git-base-rc-tag.sh 失败，使用 git describe 获取最近标签
            if not tag:
                tag = self.run_git_command(["describe", "--tags", "--abbrev=0"], cwd=self.work_dir)
                if not tag or tag.strip() == "":  # 检查空字符串
                    self.logger.info("No tags found in repository")
                    return None
                self.logger.info(f"git describe 找到标签: {tag}")
            
            # 3. 验证标签对应的提交状态
            tag_status = self.get_commit_status_by_job(tag)
            
            if tag_status == 'good':
                self.logger.info(f"标签 {tag} 状态为 'good'")
                return tag
            else:
                self.logger.info(f"标签 {tag} 状态为: {tag_status}，不是好提交")
                return None
                
        except Exception as e:
            self.logger.error(f"标签查询失败: {str(e)}")
            return None

    def _is_ancestor(self, candidate: str, target: str) -> bool:
        """验证候选提交是否是目标提交的祖先"""
        try:
            self.run_git_command(["merge-base", "--is-ancestor", candidate, target], cwd=self.work_dir)
            return True
        except subprocess.CalledProcessError as e:
            self.logger.error(f"Error checking ancestor relationship: {e.stderr}")
            return False

    def _find_nearest_commit(self, commits: list) -> str:
        """查找最接近坏提交的祖先提交"""
        # 如果只有一个候选提交，直接返回
        if len(commits) == 1:
            return commits[0]
            
        # 获取坏提交的所有祖先
        cmd = f"git -C {self.work_dir} rev-list {self.bad_commit} --first-parent"
        try:
            ancestors = subprocess.check_output(cmd, shell=True, text=True).splitlines()
        except subprocess.CalledProcessError:
            self.logger.warning("无法获取提交祖先列表，返回第一个候选提交")
            return commits[0]
        
        # 按拓扑顺序排序候选提交
        ordered_commits = []
        for commit in ancestors:
            if commit in commits:
                ordered_commits.append(commit)
        
        # 返回最接近坏提交的祖先（列表最后一个）
        return ordered_commits[-1] if ordered_commits else commits[0]

    def find_good_commit_by_job(self, start_commit=None):
        """
        Attempt to find a good commit by submiting job and checking the status of various commits.

        :param start_commit: 开始查找的提交，如果为None则使用bad_commit
        :return: Good commit hash if found, otherwise None
        """
        # 确定搜索起点
        search_base = start_commit if start_commit else self.bad_commit
        self.logger.info(f"从提交 {search_base} 开始时间范围查找")
        
        # Define the number of days ago to check for commits
        day_agos = [90, 180, 365, 1095]  # 3个月、半年、一年、3年

        # Get the list of test commits based on the specified days ago
        commits = self.get_test_commits(self.work_dir, search_base, day_agos)
        self.logger.info(f"Commits: {commits}")

        # Iterate through the commits to find a good one
        for commit in commits:
            commit_status = self.get_commit_status_by_job(commit)
            self.logger.info(f"Commit status: {commit_status}")

            if commit_status == 'bad':
                # Skip bad commits
                continue

            if commit_status == 'good':
                # Return the first good commit found
                return commit

        # Return None if no good commit is found
        return None

    def get_test_commits(self, work_dir, commit, day_agos):
        """
        Get a list of test commits based on the specified days ago.

        :param work_dir: The working directory of the repository
        :param commit: The starting commit hash
        :param day_agos: A list of days ago to check for commits
        :return: A list of commit hashes
        """
        commits = set()

        # Iterate through the specified days ago to get commits
        for day_ago in day_agos:
            temp_commit = self.get_day_ago_commit(work_dir, commit, day_ago)
            if temp_commit:
                self.logger.info(f"Temp commit: {temp_commit}")
                commits.add(temp_commit)

        # If no commits were found, add the last commit
        if not commits:
            last_commit = self.get_last_commit(work_dir, commit)
            if last_commit:
                commits.add(last_commit)

        # Filter out any None values and return the list of commits
        return list(filter(None, commits))

    def get_day_ago_commit(self, work_dir, commit, day_ago):
        """
        Get the commit hash from a specified number of days ago.

        :param work_dir: The working directory of the repository
        :param commit: The starting commit hash
        :param day_ago: The number of days ago to find the commit
        :return: The commit hash from the specified number of days ago
        """
        try:
            # Get the date of the specified commit
            date_cmd = f"git -C {work_dir} rev-list --first-parent --pretty=format:%cd --date=short {commit} -1 | sed -n 2p"
            date = subprocess.getoutput(date_cmd)

            # Calculate the date before the specified number of days
            before_cmd = f"date -d '-{day_ago} day {date}' +%Y-%m-%d"
            before = subprocess.getoutput(before_cmd)

            # Get the commit hash from the specified number of days ago
            day_ago_commit_cmd = f"git -C {work_dir} rev-list --before={before} --pretty=format:%H --first-parent {commit} -1 | sed -n 2p"
            day_ago_commit = subprocess.getoutput(day_ago_commit_cmd)

            return day_ago_commit
        except subprocess.CalledProcessError as e:
            self.logger.error(f"Error getting day ago commit: {e}")
            return None

    def get_last_commit(self, work_dir, commit):
        """
        Get the last commit before the specified commit.

        :param work_dir: The working directory of the repository
        :param commit: The starting commit hash
        :return: The last commit hash before the specified commit
        """
        try:
            # Get the last commit before the specified commit
            last_commit_cmd = f"git -C {work_dir} rev-list --first-parent {commit} -2 | sed -n 2p"
            last_commit = subprocess.getoutput(last_commit_cmd)

            return last_commit
        except subprocess.CalledProcessError as e:
            self.logger.error(f"Error getting last commit: {e}")
            return None

    def get_commit_status_by_job(self, commit, use_performance_check=False):
        """
        Get the status of a commit by submitting a job with the specified commit.

        注意：此方法用于查找 good_commit
        - 对于错误 bisect：使用 error_id 检查
        - 对于性能 bisect：
          - 如果 use_performance_check=True 且有 mid_point：使用性能阈值判断
          - 否则：使用 job_health 判断（仅检查功能性问题）

        :param commit: The commit hash to check the status for
        :param use_performance_check: 是否使用性能阈值检查（需要 mid_point 已设置）
        :return: The status of the commit ('good', 'bad', 'skip')
        """
        # 创建一个副本以避免修改原始对象
        job_copy = copy.deepcopy(self.bad_job)

        # Update the job copy's upstream commit to the specified commit
        # IMPORTANT: Need to update BOTH 'program'/'ss' AND 'pp' structures
        # because MD5 calculation uses the flattened 'pp' structure
        if self.commit_field == "program.makepkg.commit":
            if 'program' not in job_copy:
                self.logger.error(f"job_copy has no 'program' key! Keys: {list(job_copy.keys())}")
            elif 'makepkg' not in job_copy.get('program', {}):
                self.logger.error(f"job_copy['program'] has no 'makepkg' key! Keys: {list(job_copy.get('program', {}).keys())}")
            else:
                job_copy['program']['makepkg']['commit'] = commit

                # CRITICAL: Also update pp structure (used for MD5 calculation)
                # pp is a filtered subset of program fields, must update both
                if 'pp' in job_copy and 'makepkg' in job_copy['pp']:
                    job_copy['pp']['makepkg']['commit'] = commit
                else:
                    self.logger.warning(f"pp.makepkg not found in job_copy, MD5 may be incorrect!")

                # Invalidate cached MD5 so it gets recalculated with new commit
                if 'all_params_md5' in job_copy:
                    del job_copy['all_params_md5']
        else:
            if 'ss' not in job_copy:
                self.logger.error(f"job_copy has no 'ss' key! Keys: {list(job_copy.keys())}")
            elif 'linux' not in job_copy.get('ss', {}):
                self.logger.error(f"job_copy['ss'] has no 'linux' key! Keys: {list(job_copy.get('ss', {}).keys())}")
            else:
                job_copy['ss']['linux']['commit'] = commit

                # For ss jobs, the MD5 calculation uses job['ss'] directly (no separate pp field)
                # But we still need to invalidate the cached MD5
                if 'all_params_md5' in job_copy:
                    del job_copy['all_params_md5']

        # Submit job and poll for results
        try:
            job_id, result_root = self.submit_job(job_copy)
            job_stats, job_health = self._poll_job_stats(job_id, result_root)

            # Extract metric value if this is a performance bisect
            metric_value = None
            if self.metric and self.metric in job_stats:
                try:
                    metric_value = float(job_stats[self.metric])
                except (ValueError, TypeError):
                    pass

            # Record the job with performance value if available
            self.record_jobs((job_id, result_root), commit, job_health, metric_value)

            # 判断逻辑
            if self.metric:
                # 性能 bisect
                if use_performance_check and self.mid_point is not None and self.direction is not None:
                    # 使用性能阈值判断
                    if metric_value is not None:
                        if (metric_value - self.mid_point) * self.direction > 0:
                            self.logger.info(f"Commit {commit[:8]} is good (performance: {metric_value:.2f})")
                            return 'good'
                        else:
                            self.logger.info(f"Commit {commit[:8]} is bad (performance: {metric_value:.2f})")
                            return 'bad'
                    else:
                        self.logger.warning(f"Metric {self.metric} not found in job stats")
                        return 'skip'
                else:
                    # 回退到 job_health 判断（仅检查功能性问题）
                    if job_health == 'success':
                        self.logger.info(f"Commit {commit[:8]} is good (job_health=success)")
                        return 'good'
                    else:
                        self.logger.info(f"Commit {commit[:8]} is bad (job_health={job_health})")
                        return 'bad'
            else:
                # 错误 bisect，检查是否包含指定的 error_id
                status = self._check_error_id(job_stats, self.error_id, job_health, result_root)
                self.logger.info(f"Commit {commit[:8]} status: {status}")
                return status

        except Exception as e:
            self.logger.error(f"Error checking commit {commit[:8]} status: {e}")
            return 'skip'

    def check_performance_difference(self, v1_commit: str, v2_commit: str, metric: str,
                                    sample_count: int = 3, auto_calc_direction: bool = True) -> bool:
        """
        检查两个内核版本是否有可bisect的性能变化

        Args:
            v1_commit: 起始提交（时间上更早）
            v2_commit: 结束提交（时间上更晚）
            metric: 性能指标名称
            sample_count: 每个版本的采样数量
            auto_calc_direction: 是否自动计算direction（False时使用self.direction）

        Returns:
            bool: 如果有明显的性能差异且没有重叠，返回True，同时设置 self.mid_point 和 self.direction
        """
        self.logger.info(f"Checking performance difference between {v1_commit[:8]}(start) and {v2_commit[:8]}(end)")

        # 为v1(start_commit)收集样本
        self.v1_samples = []
        self.logger.info(f"Collecting {sample_count} samples for start commit")
        job_copy = copy.deepcopy(self.bad_job)
        if 'ss' in job_copy and 'linux' in job_copy['ss']:
            job_copy['ss']['linux']['commit'] = v1_commit
        else:
            job_copy['program']['makepkg']['commit'] = v1_commit

        # 查找已完成的job，获取多个样本
        completed_jobs = self.check_existing_completed_jobs(job_copy, limit=sample_count)

        # 从已完成的job中提取样本
        for job_id, _ in completed_jobs:
            if len(self.v1_samples) >= sample_count:
                break
            job_info = self.bisect_db.get_job_info(job_id)
            if job_info and 'stats' in job_info and metric in job_info['stats']:
                try:
                    value = float(job_info['stats'][metric])
                    self.v1_samples.append(value)
                    self.logger.info(f"Start commit sample {len(self.v1_samples)}: {value} (from existing job {job_id})")
                except (ValueError, TypeError):
                    self.logger.warning(f"Invalid metric value in job {job_id}")

        # 如果已有样本不足，提交新job补足
        while len(self.v1_samples) < sample_count:
            i = len(self.v1_samples)
            self.logger.info(f"Submitting new job for start commit sample {i+1}/{sample_count}")
            job_id, result_root = self.submit_job(job_copy, force=True)  # 强制提交新job
            job_stats, _ = self._poll_job_stats(job_id, result_root)

            if job_stats and metric in job_stats:
                try:
                    value = float(job_stats[metric])
                    self.v1_samples.append(value)
                    self.logger.info(f"Start commit sample {i+1}: {value} (from new job {job_id})")
                except (ValueError, TypeError):
                    self.logger.warning(f"Invalid metric value in new job {job_id}")
                    break  # 如果新job也失败，停止尝试

        # 为v2(end_commit)收集样本
        self.v2_samples = []
        self.logger.info(f"Collecting {sample_count} samples for end commit")
        job_copy = copy.deepcopy(self.bad_job)
        if 'ss' in job_copy and 'linux' in job_copy['ss']:
            job_copy['ss']['linux']['commit'] = v2_commit
        else:
            job_copy['program']['makepkg']['commit'] = v2_commit

        # 查找已完成的job，获取多个样本
        completed_jobs = self.check_existing_completed_jobs(job_copy, limit=sample_count)

        # 从已完成的job中提取样本
        for job_id, _ in completed_jobs:
            if len(self.v2_samples) >= sample_count:
                break
            job_info = self.bisect_db.get_job_info(job_id)
            if job_info and 'stats' in job_info and metric in job_info['stats']:
                try:
                    value = float(job_info['stats'][metric])
                    self.v2_samples.append(value)
                    self.logger.info(f"End commit sample {len(self.v2_samples)}: {value} (from existing job {job_id})")
                except (ValueError, TypeError):
                    self.logger.warning(f"Invalid metric value in job {job_id}")

        # 如果已有样本不足，提交新job补足
        while len(self.v2_samples) < sample_count:
            i = len(self.v2_samples)
            self.logger.info(f"Submitting new job for end commit sample {i+1}/{sample_count}")
            job_id, result_root = self.submit_job(job_copy, force=True)  # 强制提交新job
            job_stats, _ = self._poll_job_stats(job_id, result_root)

            if job_stats and metric in job_stats:
                try:
                    value = float(job_stats[metric])
                    self.v2_samples.append(value)
                    self.logger.info(f"End commit sample {i+1}: {value} (from new job {job_id})")
                except (ValueError, TypeError):
                    self.logger.warning(f"Invalid metric value in new job {job_id}")
                    break  # 如果新job也失败，停止尝试

        # 检查样本数量
        if len(self.v1_samples) < 2 or len(self.v2_samples) < 2:
            self.logger.error(f"Insufficient samples: start={len(self.v1_samples)}, end={len(self.v2_samples)}")
            return False

        # 计算范围和平均值
        v1_min, v1_max = min(self.v1_samples), max(self.v1_samples)
        v2_min, v2_max = min(self.v2_samples), max(self.v2_samples)
        v1_avg = sum(self.v1_samples) / len(self.v1_samples)
        v2_avg = sum(self.v2_samples) / len(self.v2_samples)

        self.logger.info(f"Start commit range: [{v1_min:.2f}, {v1_max:.2f}], avg: {v1_avg:.2f}")
        self.logger.info(f"End commit range: [{v2_min:.2f}, {v2_max:.2f}], avg: {v2_avg:.2f}")

        # 检查是否有间隙且不相交
        has_gap = (v1_max < v2_min) or (v2_max < v1_min)

        if has_gap:
            # 计算 mid_point（两个范围之间的中点）
            if v1_max < v2_min:
                self.mid_point = (v1_max + v2_min) / 2
            else:
                self.mid_point = (v2_max + v1_min) / 2

            # 根据参数决定是否自动计算 direction
            if auto_calc_direction:
                # 根据平均值计算 direction
                # v1(start) > v2(end) → 数值下降 → direction = 1 (大值=初始状态)
                # v1(start) < v2(end) → 数值上升 → direction = -1 (小值=初始状态)
                if v1_avg > v2_avg:
                    self.direction = 1
                    change_type = "decreased"
                    self.logger.info(f"Auto-calculated: start_avg({v1_avg:.2f}) > end_avg({v2_avg:.2f}), metric {change_type}")
                    self.logger.info(f"Direction=1: larger values indicate earlier state (before change)")
                else:
                    self.direction = -1
                    change_type = "increased"
                    self.logger.info(f"Auto-calculated: start_avg({v1_avg:.2f}) < end_avg({v2_avg:.2f}), metric {change_type}")
                    self.logger.info(f"Direction=-1: smaller values indicate earlier state (before change)")
            else:
                self.logger.info(f"Using preset direction: {self.direction}")

            self.logger.info(f"Mid-point (change detection threshold): {self.mid_point:.2f}")
            self.logger.info(f"Samples from start commit: {[f'{s:.2f}' for s in self.v1_samples]}")
            self.logger.info(f"Samples from end commit: {[f'{s:.2f}' for s in self.v2_samples]}")
            return True
        else:
            self.logger.info("No clear performance gap detected - ranges overlap, cannot bisect")
            return False

    def run_command_real_time(self, command):
        stdout_log = []
        stderr_log = []

        process = subprocess.Popen(
            command,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,  # 添加stderr捕获
            text=True,
            bufsize=1,
            cwd=BisectConfig.SCRIPT_DIR,
            universal_newlines=True
        )

        # 同时处理stdout和stderr
        while True:
            # 检查进程是否结束
            if process.poll() is not None:
                # 读取剩余输出
                remaining_stdout = process.stdout.read()
                remaining_stderr = process.stderr.read()
                if remaining_stdout:
                    sys.stdout.write(remaining_stdout)
                    stdout_log.append(remaining_stdout)
                if remaining_stderr:
                    sys.stderr.write(remaining_stderr)
                    stderr_log.append(remaining_stderr)
                break

            # 非阻塞读取
            try:
                stdout_line = process.stdout.readline()
                if stdout_line:
                    sys.stdout.write(stdout_line)
                    sys.stdout.flush()
                    stdout_log.append(stdout_line)

                stderr_line = process.stderr.readline()
                if stderr_line:
                    sys.stderr.write(stderr_line)
                    sys.stderr.flush()
                    stderr_log.append(stderr_line)
            except:
                break

        full_stdout = "".join(stdout_log)
        full_stderr = "".join(stderr_log)

        # 合并stdout和stderr用于解析
        full_output = full_stdout + "\n" + full_stderr

        # 记录详细日志用于调试
        self.logger.debug(f"Command stdout: {full_stdout}")
        self.logger.debug(f"Command stderr: {full_stderr}")

        return full_output.strip()

    def run_bisect(self):
        """
        Run the Git bisect process to identify the problematic commit.
        """

        error_id = '"{}"'.format(self.error_id)
        # Print initial information
        self.logger.info(f"Work directory: {self.work_dir}")
        self.logger.info(f"Bad commit: {self.bad_commit}")
        self.logger.info(f"Good commit: {self.good_commit}")

        # 验证提交顺序：good commit 必须是 bad commit 的祖先
        try:
            # 根据 bisect 类型打印不同的信息
            if self.metric:
                self.logger.info("Bisect mode: Looking for performance regression commit")
                self.logger.info(f"Metric: {self.metric}")
            elif self.error_id:
                self.logger.info("Bisect mode: Looking for first bad commit (error bisect)")
                self.logger.info(f"Error ID: {self.error_id}")
            else:
                self.logger.info("Bisect mode: Looking for first bad commit")

            self.logger.info(f"Good commit (ancestor): {self.good_commit}")
            self.logger.info(f"Bad commit (descendant): {self.bad_commit}")

        except Exception as e:
            self.logger.warning(f"Failed to setup bisect: {str(e)}")

        try:
            # 在 bisect 开始前提交 HEAD 测试任务
            self.head_job_info = self._submit_head_job()

            # Use --no-checkout to avoid checking out files (improves performance)
            start_command = f"git -C {self.work_dir} bisect start --no-checkout {self.bad_commit} {self.good_commit}"
            start_result = subprocess.getoutput(start_command)

            pattern = r"Bisecting:"
            if re.search(pattern, start_result):
                self.logger.info("Start Bisecting")
                if self.metric:
                    # 验证所有参数有效性
                    if self.mid_point is None or self.direction is None:
                        error_msg = f"Invalid mid_point parameters: mid_point={self.mid_point}, direction={self.direction}"
                        self.logger.error(error_msg)
                        raise ValueError(error_msg)

                    run_command = [
                        "git", "-C", self.work_dir, "bisect", "run",
                        BisectConfig.BISECT_MIDPOINT_SCRIPT,
                        str(self.bad_job_id),
                        str(self.metric),
                        str(self.temp_result_root),
                        str(float(self.mid_point)),  # 传递 mid_point
                        str(int(self.direction)),    # 传递 direction (1 或 -1)
                        str(self.session_id),  # 添加会话ID参数
                    ]
                else:
                    run_command = ["git", "-C", self.work_dir, "bisect", "run", BisectConfig.BISECT_RUN_SCRIPT, self.bad_job_id, error_id, self.temp_result_root, str(self.session_id)]

                self.logger.info(f"Executing bisect run command: {' '.join(run_command)}")
                bisect_result = self.run_command_real_time(run_command)

                # Enhanced result validation with detailed error reporting
                if bisect_result:
                    result_length = len(bisect_result.strip())
                    self.logger.info(f"Bisect completed with result (length: {result_length} chars)")
                    self.bisect_end_time = int(time.time())

                    # 保存完整的 bisect 日志
                    self._save_complete_bisect_log(bisect_result)

                    return bisect_result
                else:
                    # Empty or None result - provide detailed diagnostic info
                    error_details = [
                        "Git bisect run completed but returned empty result",
                        f"Work directory: {self.work_dir}",
                        f"Commit range: {self.good_commit[:8]}..{self.bad_commit[:8]}",
                        f"Bisect mode: {'performance' if self.metric else 'error'}",
                    ]

                    if self.metric:
                        error_details.append(f"Metric: {self.metric}, mid_point: {self.mid_point}, direction: {self.direction}")
                    else:
                        error_details.append(f"Error ID: {self.error_id}")

                    # Check git bisect log for clues
                    try:
                        git_log_cmd = ["git", "-C", self.work_dir, "bisect", "log"]
                        git_log = subprocess.run(git_log_cmd, capture_output=True, text=True, timeout=30)
                        if git_log.stdout:
                            error_details.append(f"Git bisect log:\n{git_log.stdout[:500]}")
                    except Exception as log_err:
                        error_details.append(f"Could not retrieve git bisect log: {log_err}")

                    error_msg = "\n".join(error_details)
                    self.logger.error(error_msg)
                    raise RuntimeError(f"Bisect run returned empty result. {error_details[0]}")
            else:
                # Failed to start bisect - provide detailed error
                error_msg = (
                    f"Failed to start git bisect.\n"
                    f"Command: {start_command}\n"
                    f"Output: {start_result}\n"
                    f"Expected pattern 'Bisecting:' not found in output."
                )
                self.logger.error(error_msg)
                raise RuntimeError(f"Git bisect start failed: {start_result[:200]}")

        except subprocess.CalledProcessError as e:
            error_msg = (
                f"Bisect process failed with exit code {e.returncode}\n"
                f"Command: {e.cmd}\n"
                f"Commit range: {self.good_commit[:8]}..{self.bad_commit[:8]}\n"
                f"Work directory: {self.work_dir}\n"
                f"Output: {e.output if hasattr(e, 'output') else 'N/A'}"
            )
            self.logger.error(error_msg)
            raise RuntimeError(f"Git bisect subprocess failed (exit code {e.returncode})") from e

    def init_job_content(self, job_id):
        """
        Initialize the job content by fetching job information from database.

        :param job_id: The ID of the job to initialize
        :return: A dictionary containing the job content
        :raises ValueError: If job not found or invalid format
        :raises Exception: If database error occurs
        """
        if not job_id or not isinstance(job_id, str) or not job_id.isdigit():
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        try:
            job_json = self.bisect_db.get_job_info(job_id)
            if not job_json:
                raise ValueError(f"Job {job_id} not found in database")

            # Validate job JSON structure
            if not isinstance(job_json, dict):
                raise ValueError(f"Invalid job data format for {job_id}")

            # Validate required fields
            required_fields = ['program', 'ss']
            if not any(field in job_json for field in required_fields):
                raise ValueError("Invalid job format: missing program/ss section")

            # Initialize the job content with additional fields
            job = {
                **job_json,
                'bad_job_id': job_id,
                'testbox': job_json.get('tbox_group'),
                'runtime': 7200  # Default runtime value
            }

            # Remove necessary keys
            # 注意：不要删除 pp 和 ss 字段，它们是 MD5 计算的核心参数
            # pp (program parameters) 和 ss (system software) 用于识别 job 配置
            black_lists = ['SCHED_HOST', 'SCHED_PORT', 'DATA_API_PORT', 'lab_id',
                'git', 'install_os_packages_all', 'install_pip_packages_all',
                'install_gem_packages_all', 'install_pkgbuild_packages_all',
                'job2sh', 'LKP_CGI_PORT', 'DNS_HOST', 'LKP_SERVER', 'GIT_SERVER',
                'OS_HTTP_HOST', 'OS_HTTP_PORT', 'SRV_HTTP_RESULT_HOST',
                'SRV_HTTP_OS_HOST', 'SRV_HTTP_GIT_HOST', 'SRV_HTTP_CCI_HOST',
                'SRV_HTTP_RESULT_PORT', 'SRV_HTTP_OS_PORT', 'SRV_HTTP_GIT_PORT',
                'SRV_HTTP_CCI_PORT', 'SEND_MAIL_HOST', 'SEND_MAIL_PORT', 'INITRD_HTTP_HOST',
                'INITRD_HTTP_PORT', 'ASSIST_RESULT_HOST', 'ASSIST_RESULT_PORT',
                'ASSISTANT_HOST', 'ASSISTANT_PORT', 'RESULT_WEBDAV_HOST',
                'RESULT_WEBDAV_PORT', 'DOCKER_REGISTRY_HOST', 'DOCKER_REGISTRY_PORT',
                'JUMPER_HOST', 'JUMPER_PORT', 'MONITOR_HOST', 'MONITOR_PORT', 'MQ_HOST',
                'MQ_PORT', 'GIT_MIRROR_HOST', 'REMOTE_GIT_HOST', 'REMOTE_GIT_PORT',
                'UPDATE_REPO_HOST', 'UPDATE_REPO_PORT', 'SRV_HTTP_OS_REPO_HOST',
                'SRV_HTTP_OS_REPO_PORT',
                'tbox_group', 'submit_id', 'nr_run', 'osv', 'docker_image', 'id', 'job_state',
                'job_stage', 'emsx', 'os_mount', 'tbox_type', 'submit_date', 'rootfs', 'result_root',
                'upload_dirs', 'result_service', 'subqueue', 'submit_time', 'memory_minimum', 'time', 'initrd_deps', 'initrd_pkgs',
                'initrds_uri', 'added_by', 'hw', 'install_os_packages', 'services', 'monitors',
                'pkg_data', 'mount_repo_name', 'mount_repo_priority',
                'external_mount_repo_addr', 'deadline', 'mount_repo_addr',
                'last_success_stage', 'host_machine', 'external_mount_repo_name',
                'external_mount_repo_priority', 'boot_time', 'boot_seconds',
                'running_time', 'post_run_time', 'loadavg', 'end_time', 'start_time',
                'run_seconds', 'finish_time', 'errid', 'lab', 'my_account', 'os_arch', 'arch',
                'job_token', 'dispatch_time', 'setup_time', 'job_data_readiness', 'uploading_time', 'uploaded_time',
                'complete_time', 'need_file_store', 'host_keys', 'kernel_param',
                'all_params_md5']  # 删除 all_params_md5，强制动态重新计算以确保准确性

            # Note: 'stats' is intentionally NOT in black_lists because:
            # - Performance bisect needs stats to get metric values
            # - 'errid' is also excluded from black_lists for error bisect

            for key in black_lists:
                if key in job:
                    del job[key]

            # --- [ADDED] ---
            # TODO: This is a temporary patch and should be removed later.
            # Remove the 'branch' parameter to ensure bisect uses only the commit hash
            if 'program' in job and isinstance(job.get('program'), dict):
                if 'makepkg' in job['program'] and isinstance(job['program'].get('makepkg'), dict):
                    if 'branch' in job['program']['makepkg']:
                        del job['program']['makepkg']['branch']
                        self.logger.info("Removed 'branch' parameter from program.makepkg for bisect jobs.")

            return job

        except Exception as e:
            self.logger.error("Failed to initialize job content",
                                       job_id=job_id,
                                       error=str(e),
                                       stack_trace=traceback.format_exc())
            raise

    def check_existing_completed_jobs(self, job, limit: int = 1):
        """
        Check for existing completed jobs with same configuration

        Args:
            job: Job configuration to check
            limit: Maximum number of completed jobs to return

        Returns:
            List of (job_id, result_root) tuples for completed jobs, sorted by recency
        """
        try:
            # Always generate MD5 for the job if not present
            if not job.get('all_params_md5'):
                job['all_params_md5'] = calculate_all_params_md5(job)

            # Always use MD5-based duplicate detection
            all_jobs = self.bisect_db.check_existing_job_by_md5(job, limit=limit * 3)

            if not all_jobs:
                self.logger.debug("MD5 lookup failed, trying field matching fallback")
                all_jobs = self.bisect_db.check_existing_job(job, limit=limit * 3)

            if not all_jobs:
                self.logger.debug("No existing jobs found for configuration")
                return []

            completed_jobs = []
            
            for job_id, result_root in all_jobs:
                try:
                    # Get job details to check completion status
                    job_info = self.bisect_db.get_job_info(job_id)
                    if not job_info:
                        self.logger.debug(f"Job {job_id} info not found, skipping")
                        continue
                    
                    # Check if job is completed
                    job_stage = job_info.get('job_stage')
                    job_data_readiness = job_info.get('job_data_readiness')
                    
                    # Job is considered completed if:
                    # 1. job_stage == 'finish'
                    # 2. job_data_readiness == 'complete'
                    if (job_stage == 'finish' and 
                        job_data_readiness == 'complete'):
                        
                        completed_jobs.append((job_id, result_root))
                        self.logger.debug(f"Found completed job: {job_id}")
                        
                        # Stop when we have enough completed jobs
                        if len(completed_jobs) >= limit:
                            break
                    else:
                        self.logger.debug(f"Job {job_id} not completed: stage={job_stage}, readiness={job_data_readiness}")
                        
                except Exception as e:
                    self.logger.warning(f"Error checking job {job_id}: {str(e)}")
                    continue
            
            self.logger.debug(f"Found {len(completed_jobs)} completed jobs out of {len(all_jobs)} total jobs")
            return completed_jobs
            
        except Exception as e:
            self.logger.error(f"Error checking existing completed jobs: {str(e)}")
            return []

    def record_jobs(self, job_info: list, job_commit: str, status: str, metric_value: Optional[float] = None) -> None:
        """
        Record job information in JSON Lines format.
        Args:
            job_info: List of job details
            job_commit: Associated commit hash
            status: Job status ('good', 'bad', 'skip')
            metric_value: Optional performance metric value (for performance bisect)
        """
        # Create result directory if not exists [1]
        os.makedirs(self.temp_result_root, exist_ok=True)

        # Construct normalized file path
        commit_jobs_path = os.path.join(self.temp_result_root, 'commit_jobs.jsonl')

        # Build record with type hints
        record: dict[str, Any] = {
            "commit": job_commit,
            "job_id": job_info[0],
            "job_result_root": job_info[1],
            "status": status,
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        }

        # For performance bisect, include the metric value
        if metric_value is not None:
            record["metric_value"] = metric_value

        # Atomic write with proper encoding and error handling
        try:
            with open(commit_jobs_path, 'a', encoding='utf-8') as f:
                # Write compact JSON with ASCII control
                f.write(json.dumps(record, ensure_ascii=False) + '\n')
        except IOError as e:
            self.logger.error(f"Failed to write job record: {str(e)}")

    def submit_job(self, job, success_limit: int = 1, force: bool = False) -> tuple:
        """
        Submit job and return (job_id, result_root) tuple with deduplication.
        To force a new submission, set the 'force' parameter to True.

        Args:
            job: Job configuration to submit
            success_limit: Minimum number of completed jobs required to skip submission
            force: If True, skips the check for existing jobs and submits a new one.

        Returns:
            tuple: (job_id, result_root) for the job (existing or new)
        """
        try:
            # 创建 job 的深拷贝，避免修改原始对象
            job_copy = copy.deepcopy(job)

            # Check for existing completed jobs, unless forced to resubmit
            if not force:
                completed_jobs = self.check_existing_completed_jobs(job_copy, success_limit)

                if completed_jobs and len(completed_jobs) >= success_limit:
                    # Return the most recent completed job
                    latest_job = completed_jobs[0]  # Already sorted by recency
                    self.logger.debug(f"Found {len(completed_jobs)} completed jobs, reusing latest: {latest_job[0]}")
                    return latest_job

            # If not enough completed jobs exist, or if resubmission is forced, submit a new job
            if force:
                self.logger.info("Forcing new job submission because 'force' parameter was True.")
            else:
                completed_jobs_count = len(self.check_existing_completed_jobs(job_copy, success_limit) or [])
                self.logger.info(f"Not enough completed jobs found ({completed_jobs_count}/{success_limit}), submitting a new job.")

            job_info = self._submit_new_job(job_copy)
            if not job_info:
                raise JobSubmissionError("Job submission failed")

            self.logger.info(f"Successfully submitted new job: {job_info[0]}")
            return job_info
        finally:
            self._submitting_job = False

    def _submit_new_job(self, job):
        """
        Submit a job and wait for the response. The generated YAML file is temporary
        and will be cleaned up after submission, but preserved on failure.

        :param job: A dictionary containing job details
        :return: The job ID and result_root if submission is successful, otherwise None
        """
        # 1. Robustness Check: Ensure the job object is valid before proceeding.
        if not job or not isinstance(job, dict):
            error_msg = "Attempted to submit an invalid or empty job object."
            self.logger.error(error_msg)
            raise JobSubmissionError(error_msg)

        # 2. Unique Filename Generation: Create a unique YAML file to avoid overwrites.
        commit = self._get_commit_from_job(job) or "unknown_commit"

        # 生成高精度唯一标识符
        timestamp_ms = int(time.time() * 1000)  # 毫秒级时间戳
        process_id = os.getpid()  # 进程ID
        random_suffix = random.randint(1000, 9999)  # 随机后缀

        # 使用更长的commit hash以减少冲突
        commit_hash = commit[:12] if len(commit) >= 12 else commit

        temp_yaml_filename = f"bisect_job_{commit_hash}_{timestamp_ms}_{process_id}_{random_suffix}.yaml"

        # 使用相对于当前工作目录的路径，避免LKP的路径拼接问题
        # LKP的find_jobfile方法在处理绝对路径时会出现路径重复问题
        temp_yaml_path = os.path.abspath(temp_yaml_filename)

        try:
            # Write the job YAML to the unique temporary file
            with open(temp_yaml_path, 'w') as f:
                yaml.dump(job, f, sort_keys=False)
            self.logger.info(f"Generated temporary job file: {temp_yaml_path}")

            # Submit the job and wait for the response
            # Use --no-pack to skip LKP_SRC delta packing (not needed for bisect)
            lkp_src = os.environ['LKP_SRC']

            # Use subprocess.run with explicit cwd to ensure proper working directory
            result = subprocess.run(
                [f"{lkp_src}/sbin/submit", temp_yaml_path],
                cwd=lkp_src,
                capture_output=True,
                text=True,
                env=os.environ.copy()
            )
            retcode = result.returncode
            response = result.stdout + result.stderr

            if retcode == 0:
                try:
                    job_id, result_root = None, None
                    for line in response.split('\n'):
                        if 'got job id=' in line:
                            match = re.search(r'id=(\d+)', line)
                            if match:
                                job_id = match.group(1)
                        elif line.startswith('result_root '):
                            result_root = line.split(' ', 1)[1].strip()
                    
                    if job_id and result_root:
                        return [job_id, result_root]
                    else:
                        raise IndexError("Could not parse job_id and result_root from submit output.")
                except IndexError:
                    self.logger.error(f"Error parsing job ID from response:\n{response}")
                    # On parsing failure, we consider it a submission failure.
                    # The temp file will be preserved by the exception handling below.
                    raise JobSubmissionError("Failed to parse submit command output.")
            else:
                # Construct command string for error message
                submit_cmd_str = f"{lkp_src}/sbin/submit {temp_yaml_path}"
                error_msg = (
                        f"Job submission failed with return code {retcode}\n"
                        f"Command: {submit_cmd_str}\n"
                        f"Job file '{temp_yaml_path}' has been preserved for inspection.\n"
                        "Error output:\n"
                        f"{'-'*40}\n"
                        f"{response}\n"
                        f"{'-'*40}"
                    )
                self.logger.error(error_msg)
                # Preserve the failed YAML file by not deleting it.
                raise JobSubmissionError(f"Submission failed for {temp_yaml_path}")

        except Exception as e:
            # Re-raise any exception to be handled by the caller.
            # The key is that we don't enter the 'finally' block's deletion
            # logic if an error occurred.
            self.logger.error(f"Error during job submission process for {temp_yaml_path}: {e}")
            raise
        finally:
            # 3. Cleanup: 保留失败的YAML文件用于调试
            # 只有在成功提交且job最终成功时才删除YAML文件
            should_delete = (
                'retcode' in locals() and
                retcode == 0 and
                os.path.exists(temp_yaml_path) and
                # 可以根据需要添加更多条件，比如job最终状态
                os.getenv('BISECT_KEEP_YAML_FILES', '0') != '1'  # 环境变量控制
            )

            if should_delete:
                os.remove(temp_yaml_path)
                self.logger.debug(f"Cleaned up temporary job file: {temp_yaml_path}")
            else:
                self.logger.info(f"Keeping job file for debugging: {temp_yaml_path}")


    def wait_for_status(self, job_id: str, result_root: str, metric_criteria: dict) -> str:
        self.logger.info('wait for status')
        """Wait for job completion and determine status"""
        job_stats, job_health = self._poll_job_stats(job_id, result_root)
        return self._determine_status(job_stats, job_health, result_root, metric_criteria)

    def submit_wait_job_status(self, job: Dict[str, Any], stat: str) -> str:
        """
        提交作业并等待状态，是bisect过程的主要接口

        :param job: 要提交的作业配置
        :type job: Dict[str, Any]
        :param stat: 统计指标名称或错误ID
        :type stat: str
        :return: 作业状态（'good', 'bad', 'skip', 'error'）
        :rtype: str
        """
        # 添加调试日志
        is_perf = self.metric and stat == self.metric
        self.logger.info("Entering submit_wait_job_status",
                    job_keys=list(job.keys()),
                    stat=stat,
                    is_perf=is_perf,
                    mid_point=self.mid_point if is_perf else None,
                    direction=self.direction if is_perf else None)

        try:
            # Prepare criteria based on bisect type
            criteria = {
                'type': 'perf' if is_perf else 'error',
                'metric': stat.strip('"') if is_perf else None,
                'error_id': stat.strip('"') if not is_perf else None
            }

            # Submit job and wait for status
            job_id, result_root = self.submit_job(job)
            job_stats, job_health = self._poll_job_stats(job_id, result_root)
            status = self._determine_status(job_stats, job_health, result_root, criteria)

            # Extract metric value if this is a performance bisect
            metric_value = None
            if is_perf and criteria['metric'] in job_stats:
                try:
                    metric_value = float(job_stats[criteria['metric']])
                except (ValueError, TypeError):
                    pass

            # Record results with performance value if available
            commit = self._get_commit_from_job(job)
            self.record_jobs((job_id, result_root), commit, status, metric_value)

            self.logger.info(f"Final status: {status}")
            return status

        except (JobSubmissionError, JobStatusTimeoutError) as e:
            self.logger.error(f"Bisect process failed: {str(e)}")
            return 'error'


    def _poll_job_stats(self, job_id: str, result_root: str = None) -> Tuple[Dict[str, Any], str]:
        """
        轮询作业状态，使用轮询间隔控制和指数退避策略

        :param job_id: 作业ID
        :param result_root: 作业结果根目录路径（可选）
        :return: 一个元组 (作业统计信息, 作业健康状态)
        :raises JobStatusError: 当作业不存在时
        :raises JobStatusTimeoutError: 当等待超时时
        """
        # 轮询间隔控制
        poll_interval = 30  # 初始轮询间隔30秒
        last_poll_time = 0  # 上次轮询时间
        consecutive_no_progress = 0  # 连续无进展次数

        # 指数退避参数
        current_delay = 5
        max_delay = 100
        start_time = time.time()
        poll_count = 0

        self.logger.info(f"Starting to poll job {job_id} status")

        while time.time() - start_time < BisectConfig.JOB_POLL_TIMEOUT:
            current_time = time.time()

            # 检查是否达到轮询间隔
            if current_time - last_poll_time < poll_interval:
                # 未达到轮询间隔，等待
                sleep_time = min(poll_interval // 2, 10)
                time.sleep(sleep_time)
                continue

            # 更新上次轮询时间
            last_poll_time = current_time

            try:
                poll_count += 1

                # 使用缓存的查询方法
                job_content = self.bisect_db.get_job_info(job_id)
                if not job_content:
                    raise JobStatusError(f"Job {job_id} not found")

                # 检查job_stage是否为finish或其他终止状态
                job_stage = job_content.get('job_stage', 'N/A')
                readiness = job_content.get('job_data_readiness', 'N/A')
                job_health = job_content.get('job_health', 'unknown')

                self.logger.debug(f"Job {job_id} status - stage: {job_stage}, readiness: {readiness}, health: {job_health}")

                # 正常完成的情况
                if job_stage == 'finish' and readiness == 'complete':
                    stats = job_content.get('stats', {})
                    self.logger.info(f"Job {job_id} status ready (finished and complete)")

                    # 如果传入了 result_root，等待 build log 文件上传完成
                    if result_root:
                        try:
                            self.logger.info(f"Waiting for build log in {result_root}")
                            if not self._wait_for_build_log(result_root, timeout=600):
                                self.logger.warning(f"Build log not found after waiting 600s, proceeding anyway")
                        except Exception as e:
                            self.logger.error(f"Error during build log check: {e}")

                    self.logger.info(f"Returning from _poll_job_stats for job {job_id}")
                    return stats, job_health

                # 处理各种失败/中止状态
                elif job_stage in ('abort_wait', 'cancel') or readiness in ('incomplete', 'norun'):
                    self.logger.info(f"Job {job_id} terminated - stage: {job_stage}, readiness: {readiness}")
                    return {}, job_health

                # 任务仍在进行中，调整轮询间隔
                consecutive_no_progress += 1
                # 指数退避：每次无进展增加50%的间隔，最大不超过300秒（5分钟）
                poll_interval = min(300, int(poll_interval * 1.5))
                self.logger.debug("Job still in progress, increasing poll interval",
                            job_id=job_id,
                            consecutive_no_progress=consecutive_no_progress,
                            new_interval=poll_interval)

                # 指数退避策略
                time.sleep(current_delay)
                current_delay = min(current_delay * 1.5, max_delay)

            except Exception as e:
                self.logger.error(f"Error in poll loop for job {job_id}: {type(e).__name__}: {str(e)}")
                if current_delay >= max_delay:
                    raise JobStatusTimeoutError(f"Timeout waiting for job {job_id}")
                time.sleep(current_delay)

        raise JobStatusTimeoutError(f"Job {job_id} exceeded maximum wait time")

    def _determine_status(self, stats: dict, job_health: str, result_root: str, criteria: dict) -> str:
        """Determine job status based on criteria"""
        try:
            if criteria['type'] == 'perf':
                status = self._check_perf_metric(stats, criteria['metric'])
                self.logger.debug("Performance status determined",
                           status=status,
                           metric=criteria['metric'],
                           stats=stats)
                return status

            status = self._check_error_id(stats, criteria['error_id'], job_health, result_root)
            self.logger.debug("Error ID status determined",
                        status=status,
                        error_id=criteria['error_id'],
                        stats=stats)
            return status

        except Exception as e:
            self.logger.error("Status determination failed",
                        error=str(e),
                        traceback=traceback.format_exc(),
                        criteria=criteria,
                        stats=stats)
            raise

    def _check_perf_metric(self, stats, metric) -> str:
        """
        使用mid_point + direction逻辑评估性能指标状态

        逻辑:
        - direction = -1 (lower is better): measured < mid_point → GOOD
        - direction = 1 (higher is better): measured > mid_point → GOOD
        """
        value = stats.get(metric)
        if value is None:
            self.logger.error(f"Metric {metric} not found in job stats")
            return 'skip'

        try:
            measured = float(value)

            # 检查 mid_point 和 direction 是否已设置
            if self.mid_point is None or self.direction is None:
                self.logger.error("mid_point or direction not set for performance evaluation")
                return 'skip'

            self.logger.info(f"Performance check: measured={measured}, mid_point={self.mid_point}, direction={self.direction}")

            # 使用 direction 系数判断
            # (measured - mid_point) * direction > 0 表示朝着好的方向
            if (measured - self.mid_point) * self.direction > 0:
                self.logger.info("Performance is on the good side → GOOD")
                return 'good'
            else:
                self.logger.info("Performance is on the bad side → BAD")
                return 'bad'

        except (ValueError, TypeError) as e:
            self.logger.error(f"Invalid metric value {value} for {metric}: {e}")
            return 'skip'

    def _was_file_compiled(self, errid: str, result_root: str) -> Optional[bool]:
        """
        Check if the file related to the errid was compiled by checking the build log.
        Handles different errid formats, including 'makepkg'.

        Args:
            errid: The error ID, expected to contain a file path.
            result_root: The result directory where the build log is located.

        Returns:
            True if the file is found in the build log,
            False if the build log exists but the file is not mentioned,
            None if the build log cannot be found or the file path cannot be parsed.
        """
        file_path = None
        # Handle 'makepkg.eid.' format
        if errid.startswith('makepkg.eid.'):
            # Regex to capture file path ending in .c or .h from makepkg errids
            match = re.search(r'makepkg\.eid\.(.*?(\.c|\.h)):', errid)
            if match:
                file_path = match.group(1)
        else:
            # Handle standard "file:line" errid format
            match = re.match(r'([^:]+):\d+', errid)
            if match:
                file_path = match.group(1)

        if not file_path:
            self.logger.warning(f"Could not parse file path from errid: {errid}")
            return None

        self.logger.debug(f"Extracted file path from errid: '{file_path}'")

        # Check for build log files - prioritize makepkg (build-specific) over output (general)
        log_candidates = [os.path.join(result_root, 'makepkg'), os.path.join(result_root, 'output')]
        log_path_to_use = next((path for path in log_candidates if os.path.exists(path)), None)

        if not log_path_to_use:
            self.logger.warning(f"No build log (makepkg, output) found in: {result_root}")
            return None

        try:
            # Extract just the filename for more flexible matching
            file_basename = os.path.basename(file_path)
            self.logger.debug(f"Searching for file path '{file_path}' or basename '{file_basename}' in {log_path_to_use}")

            with open(log_path_to_use, 'r', encoding='utf-8', errors='ignore') as f:
                for line in f:
                    # Try both full path and basename matching
                    if file_path in line or file_basename in line:
                        self.logger.info(f"Found evidence of file compilation for '{file_path}' in {log_path_to_use}.")
                        return True
            self.logger.info(f"File '{file_path}' (or basename '{file_basename}') was not mentioned in {log_path_to_use}.")
            return False
        except Exception as e:
            self.logger.error(f"Failed to read or search build log {log_path_to_use}: {e}")
            return None

    def _wait_for_build_log(self, result_root: str, timeout: int = 600) -> bool:
        """
        Wait for build log files to appear in result_root with exponential backoff.

        Args:
            result_root: The result directory path
            timeout: Maximum time to wait in seconds (default: 600s = 10 minutes)

        Returns:
            True if build log is found, False if timeout
        """
        log_candidates = ['makepkg', 'output']
        start_time = time.time()
        poll_interval = 2  # Start with 2 seconds
        max_interval = 30  # Max 30 seconds between checks

        self.logger.info(f"Waiting for build log in {result_root} (timeout: {timeout}s)")

        while time.time() - start_time < timeout:
            for log_name in log_candidates:
                log_path = os.path.join(result_root, log_name)
                if os.path.exists(log_path) and os.path.getsize(log_path) > 0:
                    elapsed = int(time.time() - start_time)
                    self.logger.info(f"Build log found: {log_path} after {elapsed}s (size: {os.path.getsize(log_path)} bytes)")
                    return True

            elapsed = int(time.time() - start_time)
            self.logger.debug(f"Build log not found yet, waiting {poll_interval}s... ({elapsed}/{timeout}s elapsed)")

            time.sleep(poll_interval)

            # Exponential backoff: increase interval by 50%, up to max_interval
            poll_interval = min(max_interval, int(poll_interval * 1.5))

        self.logger.warning(f"Timeout waiting for build log after {timeout}s")
        return False

    def _check_error_id(self, stats: dict, error_id: str, current_job_health: str, result_root: str) -> str:
        """
        Determines the bisect status by combining errid presence, job health,
        build stage progress, and build time analysis.

        Args:
            stats: Job statistics containing error messages, build_stage, and build_time.
            error_id: Expected error ID to match.
            current_job_health: The health status of the current job ('success', 'fail', etc.).
            result_root: The path to the job's result directory for log inspection.

        Returns:
            'bad', 'good', or 'skip'.
        """
        errid_found = self._has_error_id(stats, error_id)

        # Rule 1: If the error ID is found, the commit is definitively bad.
        if errid_found:
            self.logger.info(f"errid '{error_id}' found. Result: bad")
            return 'bad'

        # Rule 2: If the job was successful and the error ID was not found, the commit is good.
        if current_job_health == 'success':
            self.logger.info(f"errid not found and job_health is 'success'. Result: good")
            return 'good'

        # At this point, the job has failed for other reasons, and the target errid was not found.

        # Rule 3: For test tasks (functional/performance), the target error not appearing means good
        # NOTE: Both functional and performance tests are treated the same way in error bisect mode.
        # Performance bisect uses a different code path (_check_perf_metric) with mid_point logic.
        # Here in error bisect, we only care about the specific error_id:
        # - If the target error is not present, the job failure is due to other unrelated issues
        # - This means the commit is "good" with respect to the bisect target
        if not self.is_build_task:
            self.logger.info(f"errid not found, job failed, but this is a test task (not build). Result: good")
            self.logger.debug(f"Test task failures without target errid don't affect bisect result")
            return 'good'

        # Rule 4: For build/compile tasks, use enhanced analysis
        # Build tasks need more careful analysis because:
        # - Build might fail early before reaching the error location
        # - Need to check build logs to see if error file was even compiled
        # - Use build_stage and build_time to assess build progress
        return self._check_build_task_failure(error_id, stats, result_root)

    def _check_build_task_failure(self, error_id: str, stats: dict, result_root: str) -> str:
        """
        Enhanced build task failure analysis using build_stage and build_time.

        This method uses multiple strategies to determine if a missing error
        in a failed build is truly 'good' or should be 'skip' or 'bad':
        1. Check if error evidence exists in log (even if not in stats)
        2. Check build_stage progress
        3. Compare build_time
        4. For others, treat missing error as BAD (not SKIP)

        Args:
            error_id: The error identifier to check
            stats: Job statistics including build metadata
            result_root: Path to job results

        Returns:
            'good', 'bad', or 'skip'
        """
        # GUARD: This method should ONLY be called for build tasks
        # If somehow called for a test task, return 'good' immediately
        if not self.is_build_task:
            self.logger.warning(f"_check_build_task_failure called for non-build task! Treating as test task: returning 'good'")
            self.logger.debug(f"Task detection: suite={self.bad_job.get('suite')}, has_ss={bool(self.bad_job.get('ss'))}")
            return 'good'

        # Extract build metadata from stats
        build_stage = stats.get('build_stage.max', 0)
        build_time = stats.get('build_time')

        # Ensure proper type conversion
        try:
            build_stage = int(build_stage) if build_stage else 0
        except (ValueError, TypeError):
            build_stage = 0

        try:
            build_time = int(build_time) if build_time else None
        except (ValueError, TypeError):
            build_time = None

        # If build_stage from stats is 0 or unreliable, try parsing from log directly
        if build_stage == 0 and result_root:
            build_stage = self._parse_build_stage_from_log(result_root)
            if build_stage > 0:
                self.logger.info(f"Parsed build_stage from log: {build_stage} (stats showed 0)")

        self.logger.info(f"Build task analysis: stage={build_stage}, time={build_time}s" if build_time else f"Build task analysis: stage={build_stage}, time=N/A")

        # Strategy 1: Check if error evidence exists in build log (NEW)
        # Even if error_id not in stats, check if similar errors exist in log
        if self._find_error_evidence_in_log(error_id, result_root):
            self.logger.info(f"Found error evidence in build log for '{error_id}'. Result: bad")
            return 'bad'

        # Strategy 2: Enhanced build progress detection
        # Don't just rely on build_stage number - check actual compilation progress
        phases = self._detect_compile_phase(result_root)

        self.logger.info(f"Build progress: stage={build_stage}, compile_started={phases['compile_started']}, "
                        f"link_started={phases['link_started']}, many_files_compiled={phases['many_files_compiled']}")

        # Stage reference:
        # 0-5: makepkg framework stages
        # 10-14: prepare phase (patches, config)
        # 20: build started (doesn't mean much progress yet)
        # 30-39: package phase

        # If build didn't even start (stage < 20), we can't trust the result
        if build_stage < 20:
            self.logger.warning(f"Build stage {build_stage} < 20, build didn't start. Result: skip")
            return 'skip'

        # If build completed successfully (reached packaging), missing error means good
        if build_stage >= 30:
            self.logger.info(f"Build stage {build_stage} >= 30, reached packaging phase. Missing error means good")
            return 'good'

        # Build stage 20 just means "build started", not "build made progress"
        # We need to check actual build progress by looking at the log content:
        # - Did linking/archiving start? (AR commands)
        # - Were many files compiled? (CC commands)
        if build_stage >= 20:
            if phases['link_started']:
                self.logger.info(f"Build stage {build_stage}, linking/archiving started (AR commands found). "
                               f"Build made significant progress, missing error means good")
                return 'good'

            if phases['many_files_compiled']:
                self.logger.info(f"Build stage {build_stage}, many files compiled successfully. "
                               f"Build made significant progress, missing error means good")
                return 'good'

            # If build started but no clear progress indicators
            self.logger.info(f"Build stage {build_stage}, but no clear progress indicators (no AR, few CC commands). "
                           f"Build may have failed early. Continuing with other checks...")

        # Strategy 3: Check build_time threshold (if available)
        if build_time:
            # Get reference build time from bad_job if available
            reference_time = self._get_reference_build_time()
            if reference_time and reference_time > 0:
                time_ratio = float(build_time) / float(reference_time)
                if time_ratio < 0.3:  # Build completed less than 30% of normal time
                    self.logger.warning(f"Build time {build_time}s is only {time_ratio:.1%} of reference {reference_time}s. Result: skip")
                    return 'skip'
                elif time_ratio > 0.7:  # Build completed more than 70% of normal time
                    self.logger.info(f"Build time {build_time}s is {time_ratio:.1%} of reference, sufficient progress. Result: good")
                    return 'good'

        # Strategy 4: Final fallback - default to BAD for missing error in failed build
        # This should be rare now that we check build progress indicators
        # Only reached when: build didn't make enough progress AND build_time is inconclusive
        self.logger.warning(f"Build stage {build_stage}, but no clear progress indicators found. "
                          f"No evidence for error '{error_id}', defaulting to BAD for missing error in failed build")
        self.logger.warning(f"This is a rare case - consider manual verification")
        return 'bad'

    def _find_error_evidence_in_log(self, error_id: str, result_root: str) -> bool:
        """
        Search for error evidence in build log that matches the error_id pattern.
        This helps catch cases where the error occurred but wasn't captured in stats.

        Args:
            error_id: The error identifier to search for
            result_root: Path to the job's result directory

        Returns:
            True if evidence of the error is found in log, False otherwise
        """
        if not result_root or not os.path.exists(result_root):
            return False

        # Extract key patterns from error_id
        error_patterns = []

        # For file-based errors, extract the file path
        if ':' in error_id:
            file_part = error_id.split(':')[0]
            if file_part:
                # Remove makepkg.eid. prefix if present
                if file_part.startswith('makepkg.eid.'):
                    file_part = file_part[12:]  # Remove 'makepkg.eid.'
                error_patterns.append(file_part)

        # For specific error types, add common patterns
        if 'undefined reference' in error_id.lower():
            error_patterns.append('undefined reference')
        if 'multiple definition' in error_id.lower():
            error_patterns.append('multiple definition')
        if 'conflicting types' in error_id.lower():
            error_patterns.append('conflicting types')

        if not error_patterns:
            # If no specific patterns, try the whole error_id
            error_patterns.append(error_id)

        # Check build logs - prioritize makepkg (build-specific) over general logs
        log_files = ['makepkg', 'output', 'build-log', 'make.log']

        for log_name in log_files:
            log_path = os.path.join(result_root, log_name)
            if not os.path.exists(log_path):
                continue

            try:
                with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
                    content = f.read()

                    # Search for each pattern
                    for pattern in error_patterns:
                        if pattern in content:
                            # Additional check: ensure it's in an error context
                            # Look for error markers nearby
                            lines = content.split('\n')
                            for i, line in enumerate(lines):
                                if pattern in line:
                                    # Check current line and nearby lines for error indicators
                                    context = ' '.join(lines[max(0, i-2):min(len(lines), i+3)])
                                    if any(marker in context.lower() for marker in ['error:', 'error ', 'failed', 'fatal:']):
                                        self.logger.info(f"Found error evidence for '{pattern}' in {log_name}")
                                        return True

            except Exception as e:
                self.logger.debug(f"Error reading {log_path}: {e}")
                continue

        return False

    def _get_reference_build_time(self) -> Optional[int]:
        """
        Get reference build time from the original bad job or similar successful builds.

        Returns:
            Reference build time in seconds, or None if not available
        """
        # Try to get from bad_job stats
        if hasattr(self, 'bad_job') and self.bad_job:
            bad_job_stats = self.bad_job.get('stats', {})
            bad_build_time = bad_job_stats.get('build_time')
            if bad_build_time:
                self.logger.debug(f"Using bad_job build_time as reference: {bad_build_time}s")
                return int(bad_build_time)

        # Could also query database for average build times of similar jobs
        # This would require additional database queries

        # Default fallback: assume 30 minutes for kernel build
        default_time = 1800  # 30 minutes
        self.logger.debug(f"No reference build_time found, using default: {default_time}s")
        return default_time

    def _parse_build_stage_from_log(self, result_root: str) -> int:
        """
        Parse the maximum build_stage value directly from build log files.

        This is a fallback when build_stage.max stat is 0 or unavailable.

        Args:
            result_root: Path to the job's result directory

        Returns:
            Maximum build_stage value found in log, or 0 if not found
        """
        if not result_root or not os.path.exists(result_root):
            return 0

        # Check build log files - prioritize makepkg (build-specific) over output (general)
        log_candidates = ['makepkg', 'output']
        max_stage = 0

        for log_name in log_candidates:
            log_path = os.path.join(result_root, log_name)
            if not os.path.exists(log_path):
                continue

            try:
                with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
                    for line in f:
                        # Match lines like "build_stage: 20"
                        match = re.match(r'build_stage:\s*(\d+)', line)
                        if match:
                            stage = int(match.group(1))
                            max_stage = max(max_stage, stage)

                if max_stage > 0:
                    self.logger.debug(f"Found build_stage.max={max_stage} in {log_name}")
                    return max_stage

            except Exception as e:
                self.logger.debug(f"Error parsing build_stage from {log_path}: {e}")
                continue

        return 0

    def _can_verify_by_log(self, error_id: str) -> bool:
        """
        Check if this type of error can be verified by checking compilation logs.

        Args:
            error_id: The error identifier

        Returns:
            True if this error type can be verified via logs
        """
        # Errors that reference specific source files can be verified
        if re.match(r'.*\.(c|h|cpp|cc|S):', error_id):
            return True

        # makepkg errors with file references
        if error_id.startswith('makepkg.eid.') and ('.c:' in error_id or '.h:' in error_id):
            return True

        return False

    def _classify_errid_stage(self, errid: str) -> str:
        """
        根据 errid 内容判断应该在哪个编译阶段出现

        Args:
            errid: 错误ID字符串

        Returns:
            'config' - 配置阶段错误
            'compile' - 编译阶段错误
            'link' - 链接阶段错误
            'unknown' - 未知阶段错误
        """
        if not errid:
            return 'unknown'

        errid_lower = errid.lower()

        # 配置阶段错误模式
        config_patterns = [
            'kconfig', 'config', 'config_', 'missing-sysctl',
            'undefined_config', 'invalid_config', 'depends on',
            'selects', 'prompt', 'default', 'menuconfig'
        ]

        # 编译阶段错误模式
        compile_patterns = [
            '.c:', '.h:', 'error:', 'warning:', 'note:',
            'in function', 'in file', 'at line', 'expected',
            'syntax error', 'type error', 'declaration',
            'redefinition', 'conflicting types', 'implicit declaration'
        ]

        # 链接阶段错误模式
        link_patterns = [
            '.o', 'undefined reference', 'ld', 'linking',
            'multiple definition', 'relocation', 'section',
            'cannot find', 'no such file', 'archive', 'library'
        ]

        # 检查配置阶段错误
        if any(pattern in errid_lower for pattern in config_patterns):
            self.logger.debug(f"errid '{errid}' classified as config stage error")
            return 'config'

        # 检查链接阶段错误
        if any(pattern in errid_lower for pattern in link_patterns):
            self.logger.debug(f"errid '{errid}' classified as link stage error")
            return 'link'

        # 检查编译阶段错误
        if any(pattern in errid_lower for pattern in compile_patterns):
            self.logger.debug(f"errid '{errid}' classified as compile stage error")
            return 'compile'

        self.logger.debug(f"errid '{errid}' classified as unknown stage")
        return 'unknown'

    def _detect_compile_phase(self, result_root: str) -> Dict[str, bool]:
        """
        检测编译进入了哪个阶段

        Args:
            result_root: 结果目录路径

        Returns:
            包含各阶段标记的字典
        """
        phases = {
            'config_passed': False,    # 配置检查通过
            'compile_started': False,  # 开始编译
            'link_started': False,     # 开始链接/归档
            'many_files_compiled': False  # 编译了大量文件
        }

        log_candidates = [os.path.join(result_root, 'makepkg'), os.path.join(result_root, 'output')]
        log_path = next((path for path in log_candidates if os.path.exists(path)), None)

        if not log_path:
            self.logger.debug("No build log found for phase detection")
            return phases

        try:
            with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
                content = f.read()

            # 检测配置阶段通过：没有明显的配置错误，且看到了编译命令
            config_errors = ['kconfig', 'invalid configuration', 'depends on', 'selects']
            has_config_errors = any(error in content.lower() for error in config_errors)

            # 检测编译开始：看到 CC 命令或其他构建命令
            # WRAP is used for header generation in kernel builds
            compile_markers = [' cc ', ' gcc ', 'clang', 'building', 'compiling']
            phases['compile_started'] = any(marker in content.lower() for marker in compile_markers)

            # 检测链接/归档开始：看到 LD 或 AR 命令
            # AR (archiver) is used to create static libraries (.a files)
            # This indicates the build reached the linking/archiving stage
            link_markers = [
                ' ld ',           # Linker
                'linking',        # Generic linking message
                'undefined reference',  # Link-time error
            ]

            # Special check for AR (archive) commands - common in kernel builds
            # Look for patterns like "AR      drivers/hwmon/built-in.a"
            content_lower = content.lower()
            has_ar_commands = (
                ' ar ' in content_lower or           # AR command itself
                'built-in.a' in content_lower or      # Archive file creation
                '\tar\t' in content_lower or          # Tab-separated AR command
                '\nar\t' in content_lower or          # Newline + AR command
                'ar      ' in content_lower           # AR with multiple spaces (kernel style)
            )

            phases['link_started'] = any(marker in content_lower for marker in link_markers) or has_ar_commands

            # 配置通过的条件：编译开始了且没有配置错误
            phases['config_passed'] = phases['compile_started'] and not has_config_errors

            # 检测大量文件编译：统计 .o 文件引用、CC 命令和 WRAP 命令
            o_file_count = content.count('.o ')
            cc_count = content.count('CC ')    # Count CC commands
            wrap_count = content.count('WRAP ')  # Count WRAP commands (header generation)

            # Consider "many files compiled" if:
            # - More than 50 .o file references, OR
            # - More than 30 CC commands, OR
            # - More than 20 WRAP commands (indicates build preparation progressed well)
            phases['many_files_compiled'] = (o_file_count > 50) or (cc_count > 30) or (wrap_count > 20)

            # Count AR commands for better diagnostics
            ar_count = content.count('AR ') + content.count('AR\t')

            self.logger.debug(f"Compile phase detection: config_passed={phases['config_passed']}, "
                           f"compile_started={phases['compile_started']}, link_started={phases['link_started']}, "
                           f"many_files_compiled={phases['many_files_compiled']} "
                           f"(o_files={o_file_count}, cc_cmds={cc_count}, wrap_cmds={wrap_count}, "
                           f"ar_cmds={ar_count}, has_ar={has_ar_commands})")

        except Exception as e:
            self.logger.warning(f"Failed to detect compile phase: {e}")

        return phases

    def _check_errid_by_stage(self, errid: str, result_root: str, job_health: str) -> str:
        """
        根据 errid 阶段采用不同的判断逻辑

        Args:
            errid: 错误ID
            result_root: 结果目录路径
            job_health: 任务健康状态

        Returns:
            'good', 'bad', 或 'skip'
        """
        stage = self._classify_errid_stage(errid)
        phases = self._detect_compile_phase(result_root)

        self.logger.info(f"Checking errid '{errid}' (stage: {stage}) with phases: {phases}")

        if stage == 'config':
            # 配置阶段错误：如果编译进入了实际编译阶段，说明配置通过了
            if phases['compile_started']:
                self.logger.info(f"Config stage errid not found, but compile started. Result: good")
                return 'good'  # 配置没问题，错误在其他地方
            else:
                self.logger.warning(f"Config stage errid not found, but compile didn't start. Result: skip")
                return 'skip'  # 无法判断，配置阶段就失败了

        elif stage == 'compile':
            # 编译阶段错误：需要确认文件是否被编译
            file_compiled = self._was_file_compiled(errid, result_root)

            if file_compiled is True:
                self.logger.info(f"Compile stage errid not found, file was compiled. Result: good")
                return 'good'  # 文件编译了但没错误
            elif file_compiled is False:
                # 文件没编译，但如果编译了大量文件，可以认为编译足够深入
                if phases['many_files_compiled']:
                    self.logger.info(f"Compile stage errid not found, file not compiled but many files compiled. Result: good")
                    return 'good'
                else:
                    self.logger.warning(f"Compile stage errid not found, file not compiled. Result: skip")
                    return 'skip'  # 文件没编译，无法判断
            else:
                # 无法确定文件编译状态，但如果编译了大量文件，倾向于 good
                if phases['many_files_compiled']:
                    self.logger.info(f"Compile stage errid not found, file compilation unknown but many files compiled. Result: good")
                    return 'good'
                else:
                    self.logger.warning(f"Compile stage errid not found, file compilation unknown. Result: skip")
                    return 'skip'  # 无法确定文件编译状态

        elif stage == 'link':
            # 链接阶段错误：需要确认编译是否进入了链接阶段
            if phases['link_started']:
                self.logger.info(f"Link stage errid not found, but linking started. Result: good")
                return 'good'  # 进入了链接阶段但没错误
            else:
                # 链接没开始，但如果编译了大量文件，可以认为编译足够深入
                if phases['many_files_compiled']:
                    self.logger.info(f"Link stage errid not found, linking not started but many files compiled. Result: good")
                    return 'good'
                else:
                    self.logger.warning(f"Link stage errid not found, linking not started. Result: skip")
                    return 'skip'  # 编译在链接前就中断了

        # 未知阶段错误：采用保守策略
        self.logger.warning(f"Unknown stage errid not found. Using conservative approach. Result: skip")
        return 'skip'

    def _has_error_id(self, stats, error_id) -> bool:
        """检查stats中是否存在指定的error_id - 仅精确匹配，排除性能指标"""
        if not error_id or not stats:
            return False

        # 检查是否存在于stats中
        if error_id not in stats:
            self.logger.debug(f"No match found for error_id: {error_id}")
            return False

        # 获取该字段的值
        value = stats[error_id]

        # 如果值是数值类型（int/float），则这是一个性能指标，不是错误ID
        # 错误ID的值通常是字符串或者为1（表示错误出现次数）
        if isinstance(value, (int, float)):
            # 如果值大于1或者是浮点数，几乎可以确定是性能指标
            if isinstance(value, float) or (isinstance(value, int) and value > 1):
                self.logger.debug(f"Field '{error_id}' has numeric value {value}, treating as performance metric, not error_id")
                return False

        # 其他情况认为是error_id
        self.logger.debug(f"Exact match found for error_id: {error_id}")
        return True
    
    def _get_commit_from_job(self, job) -> str:
        """Extract commit from job structure"""
        if job.get("ss"):
            return job["ss"]["linux"].get("commit")
        return job['program']['makepkg'].get('commit')

    def _get_current_head_commit(self) -> Optional[str]:
        """
        获取当前仓库的 HEAD 提交

        Returns:
            HEAD 提交哈希，如果获取失败则返回None
        """
        try:
            result = subprocess.run(
                ['git', '-C', self.work_dir, 'rev-parse', 'HEAD'],
                capture_output=True,
                text=True,
                check=True
            )
            head_commit = result.stdout.strip()
            self.logger.debug(f"Current HEAD commit: {head_commit[:8]}")
            return head_commit
        except subprocess.CalledProcessError as e:
            self.logger.warning(f"Cannot get current HEAD commit: {e}")
            return None

    def _submit_head_job(self) -> Optional[Tuple[str, str]]:
        """
        提交 HEAD 提交的测试任务

        Returns:
            (job_id, result_root) 元组，如果提交失败则返回None
        """
        try:
            head_commit = self._get_current_head_commit()
            if not head_commit:
                self.logger.warning("Cannot get HEAD commit, skipping head job submission")
                return None

            self.logger.info(f"Submitting head job for commit: {head_commit[:8]}")

            # 创建 HEAD 测试任务
            head_job = copy.deepcopy(self.bad_job)
            if self.commit_field == "program.makepkg.commit":
                head_job['program']['makepkg']['commit'] = head_commit
            else:
                head_job['ss']['linux']['commit'] = head_commit

            # 提交任务
            job_id, result_root = self.submit_job(head_job, force=True)

            self.logger.info(f"Head job submitted successfully: {job_id}")
            self.logger.info(f"Head job result root: {result_root}")

            return job_id, result_root

        except Exception as e:
            self.logger.error(f"Failed to submit head job: {e}")
            return None

    def _check_head_job_status(self) -> Optional[Dict[str, Any]]:
        """
        检查 HEAD 任务的执行状态

        Returns:
            HEAD 任务状态信息字典，如果检查失败则返回None
        """
        if not self.head_job_info:
            self.logger.warning("No head job information available")
            return None

        try:
            job_id, result_root = self.head_job_info
            self.logger.info(f"Checking head job status: {job_id}")

            # 等待任务完成
            job_stats, job_health = self._poll_job_stats(job_id, result_root)

            # 根据测试类型确定状态
            if self.metric:
                # 性能测试：检查性能指标
                if self.metric in job_stats:
                    value = float(job_stats[self.metric])
                    if self.mid_point is not None and self.direction is not None:
                        if (value - self.mid_point) * self.direction > 0:
                            status = 'good'
                        else:
                            status = 'bad'
                    else:
                        status = 'good' if job_health == 'success' else 'bad'
                else:
                    status = 'skip'
            else:
                # 功能测试：检查错误ID
                status = self._check_error_id(job_stats, self.error_id, job_health, result_root)

            head_check_result = {
                'head_commit': self._get_current_head_commit(),
                'job_id': job_id,
                'status': status,
                'job_health': job_health,
                'checked_at': int(time.time())
            }

            # 如果是功能测试且状态为 bad，记录回归的错误ID
            if not self.metric and status == 'bad':
                regressed_errids = self._get_regressed_error_ids(job_stats, result_root)
                head_check_result['regressed_errids'] = regressed_errids

            self.logger.info(f"Head check result: status={status}, health={job_health}")
            return head_check_result

        except Exception as e:
            self.logger.error(f"Failed to check head job status: {e}")
            return None

    def _get_regressed_error_ids(self, stats: dict, result_root: str) -> List[str]:
        """
        获取回归的错误ID列表

        Args:
            stats: 任务统计信息
            result_root: 结果根目录

        Returns:
            回归的错误ID列表
        """
        try:
            # 从统计信息中提取错误ID
            errids = []
            if 'errid' in stats:
                if isinstance(stats['errid'], list):
                    errids.extend(stats['errid'])
                elif isinstance(stats['errid'], str):
                    errids.append(stats['errid'])

            # 从日志文件中提取错误ID
            log_files = ['makepkg', 'output', 'build-log', 'make.log']
            for log_name in log_files:
                log_file = os.path.join(result_root, log_name)
                if os.path.exists(log_file):
                    try:
                        with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
                            log_content = f.read()
                            # 简单的错误ID提取逻辑，可以根据实际情况扩展
                            if self.error_id and self.error_id in log_content:
                                errids.append(self.error_id)
                                break  # 找到后就停止检查其他日志文件
                    except Exception as e:
                        self.logger.debug(f"Failed to read log file {log_name}: {e}")
                        continue

            return list(set(errids))  # 去重

        except Exception as e:
            self.logger.warning(f"Failed to extract regressed error IDs: {e}")
            return []

    def _get_parent_commit(self, commit: str) -> Optional[str]:
        """
        获取指定提交的父提交

        Args:
            commit: 当前提交的哈希值

        Returns:
            父提交的哈希值，如果失败则返回None
        """
        try:
            parent_cmd = ['git', '-C', self.work_dir, 'rev-parse', f'{commit}^']
            result = subprocess.run(parent_cmd, capture_output=True, text=True, check=True)
            parent_commit = result.stdout.strip()
            self.logger.debug(f"Parent of {commit[:8]} is {parent_commit[:8]}")
            return parent_commit
        except subprocess.CalledProcessError as e:
            self.logger.error(f"Failed to get parent commit of {commit[:8]}: {e}")
            return None

    def _verify_bisect_result(self, first_bad_commit: str) -> Dict[str, Any]:
        """
        验证bisect结果的准确性 - 仅对性能bisect进行验证

        Args:
            first_bad_commit: 找到的第一个坏提交

        Returns:
            验证结果字典
        """
        if not BisectConfig.ENABLE_RESULT_VERIFICATION:
            self.logger.info("Result verification is disabled by configuration")
            return {'verified': False, 'reason': 'disabled'}

        # 仅对性能bisect进行验证，功能/构建bisect跳过
        if not self.metric:
            self.logger.info("Skipping verification for non-performance bisect")
            return {'verified': True, 'reason': 'not_applicable', 'confidence': 1.0}

        self.logger.info("=" * 80)
        self.logger.info("Starting performance bisect result verification (parallel mode)")
        self.logger.info(f"Verifying first_bad_commit: {first_bad_commit[:8]}")

        verification_result = {
            'verified': False,
            'first_bad_commit': first_bad_commit,
            'parent_commit': None,
            'bad_commit_verification': {},
            'parent_commit_verification': {},
            'head_validation': {},
            'confidence': 0.0
        }

        # 步骤1: 获取父提交
        parent_commit = self._get_parent_commit(first_bad_commit)
        if not parent_commit:
            self.logger.error("Cannot get parent commit, verification failed")
            verification_result['reason'] = 'cannot_get_parent'
            return verification_result

        verification_result['parent_commit'] = parent_commit

        # 步骤2: 并行提交所有验证作业 (bad, parent, head)
        self.logger.info("Step 1: Submitting verification jobs in parallel...")
        verification_jobs = self._submit_all_performance_verification_jobs(
            first_bad_commit, parent_commit
        )

        if not verification_jobs:
            self.logger.error("Failed to submit verification jobs")
            verification_result['reason'] = 'job_submission_failed'
            return verification_result

        # 步骤3: 等待所有作业完成并收集结果
        self.logger.info("Step 2: Waiting for all verification jobs to complete...")
        results = self._wait_and_analyze_performance_verification(verification_jobs)

        # 步骤4: 分析验证结果
        bad_details = results.get('bad_commit', {})
        parent_details = results.get('parent_commit', {})
        head_details = results.get('head_commit', {})

        verification_result['bad_commit_verification'] = bad_details
        verification_result['parent_commit_verification'] = parent_details
        verification_result['head_validation'] = head_details

        # 验证 bad_commit
        bad_passed = bad_details.get('verification_passed', False)
        bad_status = bad_details.get('final_status', 'unknown')

        if not bad_passed:
            self.logger.warning(f"Verification failed: {first_bad_commit[:8]} is not consistently 'bad'")
            self.logger.warning(f"Actual status: {bad_status}, Details: {bad_details}")
            verification_result['reason'] = 'bad_commit_not_bad'
            return verification_result

        # 验证 parent_commit
        parent_passed = parent_details.get('verification_passed', False)
        parent_status = parent_details.get('final_status', 'unknown')

        if not parent_passed:
            self.logger.warning(f"Verification failed: parent {parent_commit[:8]} is not consistently 'good'")
            self.logger.warning(f"Actual status: {parent_status}, Details: {parent_details}")
            verification_result['reason'] = 'parent_not_good'
            return verification_result

        # 计算总体置信度
        bad_confidence = bad_details.get('confidence', bad_details.get('stability', 0))
        parent_confidence = parent_details.get('confidence', parent_details.get('stability', 0))
        overall_confidence = (bad_confidence + parent_confidence) / 2

        verification_result['verified'] = True
        verification_result['confidence'] = overall_confidence
        verification_result['reason'] = 'success'

        self.logger.info("=" * 80)
        self.logger.info(f"Verification PASSED with confidence: {overall_confidence:.2%}")
        self.logger.info(f"Confirmed: {first_bad_commit[:8]} is the first bad commit")
        self.logger.info(f"Parent {parent_commit[:8]} is good, {first_bad_commit[:8]} is bad")

        if head_details.get('status'):
            self.logger.info(f"HEAD validation: {head_details.get('status')}")

        return verification_result

    def _submit_all_performance_verification_jobs(self, bad_commit: str, parent_commit: str) -> Dict[str, Any]:
        """
        并行提交所有性能验证作业（bad + parent + head）

        Args:
            bad_commit: 需要验证的bad提交
            parent_commit: 父提交（应该是good）

        Returns:
            包含所有提交作业信息的字典
        """
        samples = BisectConfig.PERF_VERIFICATION_SAMPLES
        self.logger.info(f"Submitting performance verification jobs: {samples} samples each for bad and parent commits")

        verification_jobs = {
            'bad_commit': bad_commit,
            'parent_commit': parent_commit,
            'samples': samples,
            'bad_jobs': [],
            'parent_jobs': [],
            'head_jobs': []
        }

        # 创建作业副本
        job_copy_bad = copy.deepcopy(self.bad_job)
        job_copy_parent = copy.deepcopy(self.bad_job)

        # 设置commit
        if self.commit_field == "program.makepkg.commit":
            job_copy_bad['program']['makepkg']['commit'] = bad_commit
            job_copy_parent['program']['makepkg']['commit'] = parent_commit
        else:
            job_copy_bad['ss']['linux']['commit'] = bad_commit
            job_copy_parent['ss']['linux']['commit'] = parent_commit

        # 并行提交bad commit的所有样本
        self.logger.info(f"Submitting {samples} jobs for bad commit {bad_commit[:8]}...")
        for i in range(samples):
            try:
                job_id, result_root = self.submit_job(job_copy_bad, force=True)
                verification_jobs['bad_jobs'].append({
                    'sample_idx': i + 1,
                    'job_id': job_id,
                    'result_root': result_root,
                    'commit': bad_commit
                })
                self.logger.info(f"  Bad commit sample {i+1}/{samples} submitted: {job_id}")
            except Exception as e:
                self.logger.error(f"Failed to submit bad commit sample {i+1}: {str(e)}")

        # 并行提交parent commit的所有样本
        self.logger.info(f"Submitting {samples} jobs for parent commit {parent_commit[:8]}...")
        for i in range(samples):
            try:
                job_id, result_root = self.submit_job(job_copy_parent, force=True)
                verification_jobs['parent_jobs'].append({
                    'sample_idx': i + 1,
                    'job_id': job_id,
                    'result_root': result_root,
                    'commit': parent_commit
                })
                self.logger.info(f"  Parent commit sample {i+1}/{samples} submitted: {job_id}")
            except Exception as e:
                self.logger.error(f"Failed to submit parent commit sample {i+1}: {str(e)}")

        # 提交head commit作业（如果有且不同于bad_commit）
        if hasattr(self, 'head_job_info') and self.head_job_info:
            # 如果之前已经提交了head job，记录它
            job_id, result_root = self.head_job_info
            head_commit = self._get_current_head_commit()
            if head_commit and head_commit != bad_commit:
                verification_jobs['head_jobs'].append({
                    'sample_idx': 1,
                    'job_id': job_id,
                    'result_root': result_root,
                    'commit': head_commit
                })
                self.logger.info(f"Using previously submitted HEAD job: {job_id}")

        self.logger.info(f"All verification jobs submitted: bad={len(verification_jobs['bad_jobs'])}, "
                        f"parent={len(verification_jobs['parent_jobs'])}, "
                        f"head={len(verification_jobs['head_jobs'])}")

        return verification_jobs

    def _wait_and_analyze_performance_verification(self, verification_jobs: Dict[str, Any]) -> Dict[str, Any]:
        """
        等待所有性能验证作业完成并分析结果

        Args:
            verification_jobs: 包含所有作业信息的字典

        Returns:
            包含bad_commit和parent_commit验证结果的字典
        """
        timeout = int(os.environ.get('BISECT_VERIFICATION_TIMEOUT', '3600'))
        start_time = time.time()
        samples = verification_jobs['samples']
        min_required_samples = max(1, samples // 2)

        results = {
            'bad_commit': {},
            'parent_commit': {},
            'head_commit': {}
        }

        # 等待并收集bad commit结果
        self.logger.info(f"Waiting for bad commit verification jobs...")
        bad_values = []
        bad_statuses = []
        failed_bad = []

        for job_info in verification_jobs['bad_jobs']:
            if time.time() - start_time > timeout:
                self.logger.error("Verification timeout exceeded")
                break

            try:
                job_id = job_info['job_id']
                result_root = job_info['result_root']
                sample_idx = job_info['sample_idx']

                self.logger.info(f"  Waiting for bad commit sample {sample_idx}/{samples}: {job_id}")
                job_stats, job_health = self._poll_job_stats(job_id, result_root)

                if self.metric in job_stats:
                    value = float(job_stats[self.metric])
                    # 使用mid_point判断状态
                    if self.mid_point is not None and self.direction is not None:
                        if (value - self.mid_point) * self.direction > 0:
                            status = 'good'
                        else:
                            status = 'bad'
                    else:
                        status = 'good' if job_health == 'success' else 'bad'

                    bad_values.append(value)
                    bad_statuses.append(status)
                    self.logger.info(f"    Sample {sample_idx}: value={value:.2f}, status={status}")
                else:
                    failed_bad.append((sample_idx, f"Metric {self.metric} not found"))
            except Exception as e:
                self.logger.error(f"  Failed to get result for sample {sample_idx}: {str(e)}")
                failed_bad.append((sample_idx, str(e)))

        # 等待并收集parent commit结果
        self.logger.info(f"Waiting for parent commit verification jobs...")
        parent_values = []
        parent_statuses = []
        failed_parent = []

        for job_info in verification_jobs['parent_jobs']:
            if time.time() - start_time > timeout:
                self.logger.error("Verification timeout exceeded")
                break

            try:
                job_id = job_info['job_id']
                result_root = job_info['result_root']
                sample_idx = job_info['sample_idx']

                self.logger.info(f"  Waiting for parent commit sample {sample_idx}/{samples}: {job_id}")
                job_stats, job_health = self._poll_job_stats(job_id, result_root)

                if self.metric in job_stats:
                    value = float(job_stats[self.metric])
                    # 使用mid_point判断状态
                    if self.mid_point is not None and self.direction is not None:
                        if (value - self.mid_point) * self.direction > 0:
                            status = 'good'
                        else:
                            status = 'bad'
                    else:
                        status = 'good' if job_health == 'success' else 'bad'

                    parent_values.append(value)
                    parent_statuses.append(status)
                    self.logger.info(f"    Sample {sample_idx}: value={value:.2f}, status={status}")
                else:
                    failed_parent.append((sample_idx, f"Metric {self.metric} not found"))
            except Exception as e:
                self.logger.error(f"  Failed to get result for sample {sample_idx}: {str(e)}")
                failed_parent.append((sample_idx, str(e)))

        # 分析bad commit结果
        if len(bad_values) >= min_required_samples:
            mean_value = statistics.mean(bad_values)
            stdev_value = statistics.stdev(bad_values) if len(bad_values) > 1 else None

            status_counts = {}
            for status in bad_statuses:
                status_counts[status] = status_counts.get(status, 0) + 1
            final_status = max(status_counts.items(), key=lambda x: x[1])[0]
            confidence = status_counts.get(final_status, 0) / len(bad_statuses)

            verification_passed = (final_status == 'bad' and
                                 confidence >= BisectConfig.VERIFICATION_CONFIDENCE_LEVEL)

            results['bad_commit'] = {
                'samples': len(bad_values),
                'values': bad_values,
                'mean': mean_value,
                'stdev': stdev_value,
                'statuses': bad_statuses,
                'final_status': final_status,
                'confidence': confidence,
                'expected_status': 'bad',
                'verification_passed': verification_passed,
                'failed_samples': len(failed_bad)
            }
        else:
            results['bad_commit'] = {
                'error': f'Insufficient samples: {len(bad_values)}/{min_required_samples}',
                'failed_samples': failed_bad,
                'verification_passed': False
            }

        # 分析parent commit结果
        if len(parent_values) >= min_required_samples:
            mean_value = statistics.mean(parent_values)
            stdev_value = statistics.stdev(parent_values) if len(parent_values) > 1 else None

            status_counts = {}
            for status in parent_statuses:
                status_counts[status] = status_counts.get(status, 0) + 1
            final_status = max(status_counts.items(), key=lambda x: x[1])[0]
            confidence = status_counts.get(final_status, 0) / len(parent_statuses)

            verification_passed = (final_status == 'good' and
                                 confidence >= BisectConfig.VERIFICATION_CONFIDENCE_LEVEL)

            results['parent_commit'] = {
                'samples': len(parent_values),
                'values': parent_values,
                'mean': mean_value,
                'stdev': stdev_value,
                'statuses': parent_statuses,
                'final_status': final_status,
                'confidence': confidence,
                'expected_status': 'good',
                'verification_passed': verification_passed,
                'failed_samples': len(failed_parent)
            }
        else:
            results['parent_commit'] = {
                'error': f'Insufficient samples: {len(parent_values)}/{min_required_samples}',
                'failed_samples': failed_parent,
                'verification_passed': False
            }

        # 处理HEAD commit结果（如果有）
        if verification_jobs['head_jobs']:
            try:
                head_job = verification_jobs['head_jobs'][0]
                job_id = head_job['job_id']
                result_root = head_job['result_root']
                head_commit = head_job['commit']

                self.logger.info(f"Checking HEAD commit job: {job_id}")
                job_stats, job_health = self._poll_job_stats(job_id, result_root)

                if self.metric in job_stats:
                    value = float(job_stats[self.metric])
                    if self.mid_point is not None and self.direction is not None:
                        if (value - self.mid_point) * self.direction > 0:
                            status = 'good'
                        else:
                            status = 'bad'
                    else:
                        status = 'good' if job_health == 'success' else 'bad'

                    results['head_commit'] = {
                        'head_commit': head_commit,
                        'job_id': job_id,
                        'value': value,
                        'status': status,
                        'job_health': job_health
                    }
                    self.logger.info(f"HEAD commit: value={value:.2f}, status={status}")
            except Exception as e:
                self.logger.error(f"Failed to check HEAD commit: {str(e)}")
                results['head_commit'] = {'error': str(e)}

        return results

    def _perform_boundary_verification(self, first_bad_commit: str, first_bad_id: str) -> Dict[str, Any]:
        """
        执行边界验证和HEAD检测

        Args:
            first_bad_commit: 找到的第一个坏提交
            first_bad_id: first_bad_commit对应的job_id

        Returns:
            验证结果字典，包含:
            - status: 'success' 或 'failed'
            - parent_job_id: parent commit的job_id
            - parent_commit: parent commit hash
            - introduced_errids: 引入的error id列表
            - head_job_id: HEAD commit的job_id (如果提交了)
            - head_status: HEAD检测状态
        """
        self.logger.info("=" * 80)
        self.logger.info("Starting boundary verification and HEAD check")
        self.logger.info(f"first_bad_commit: {first_bad_commit[:12]}")
        self.logger.info(f"first_bad_id: {first_bad_id}")

        result = {
            'status': 'pending',
            'first_bad_commit': first_bad_commit,
            'first_bad_id': first_bad_id
        }

        try:
            # 1. 获取 parent commit
            parent_commit = self._get_parent_commit_hash(first_bad_commit)
            if not parent_commit:
                self.logger.error("Failed to get parent commit")
                result['status'] = 'failed'
                result['error'] = 'cannot_get_parent_commit'
                return result

            result['parent_commit'] = parent_commit
            self.logger.info(f"parent_commit: {parent_commit[:12]}")

            # 2. 获取 HEAD commit (如果需要)
            head_commit = self._get_head_commit_hash()
            result['head_commit'] = head_commit
            self.logger.info(f"HEAD_commit: {head_commit[:12] if head_commit else 'N/A'}")

            # 3. 并行提交验证作业
            self.logger.info("Submitting verification jobs...")
            verification_jobs = self._submit_verification_jobs(
                parent_commit, head_commit, first_bad_commit
            )

            if not verification_jobs.get('parent_job_id'):
                self.logger.error("Failed to submit parent commit job")
                result['status'] = 'failed'
                result['error'] = 'parent_job_submission_failed'
                return result

            result['parent_job_id'] = verification_jobs['parent_job_id']
            result['head_job_id'] = verification_jobs.get('head_job_id')

            self.logger.info(f"parent_job_id: {verification_jobs['parent_job_id']}")
            if verification_jobs.get('head_job_id'):
                self.logger.info(f"head_job_id: {verification_jobs['head_job_id']}")

            # 4. 等待作业完成
            self.logger.info("Waiting for verification jobs to complete...")
            jobs_completed = self._wait_for_verification_jobs(verification_jobs)

            if not jobs_completed:
                self.logger.error("Verification jobs did not complete in time")
                result['status'] = 'failed'
                result['error'] = 'verification_timeout'
                return result

            # 5. 计算 errid diff (parent vs first_bad)
            self.logger.info("Calculating errid diff...")
            introduced_errids = self._calculate_errid_diff(
                verification_jobs['parent_job_id'],
                first_bad_id
            )

            result['introduced_errids'] = introduced_errids
            result['introduced_errids_count'] = len(introduced_errids)

            self.logger.info(f"Introduced {len(introduced_errids)} error IDs")

            # 5.5. Git-based verification (仅限构建任务)
            if self.is_build_task and introduced_errids:
                self.logger.info("Performing git-based file modification verification (build task only)...")
                self.logger.info(f"Note: Only checking current error_id to avoid large logs ({len(introduced_errids)} total introduced)")
                git_verification = self._verify_errids_with_git(
                    parent_commit,
                    first_bad_commit,
                    introduced_errids
                )
                result['git_verification'] = git_verification
                self.logger.info(f"Git verification: verified={git_verification['verified']}, "
                               f"confidence={git_verification['confidence']}, "
                               f"need_human_judgment={git_verification.get('need_human_judgment', False)}")
            else:
                if not self.is_build_task:
                    self.logger.info("Skipping git verification (not a build task)")
                result['git_verification'] = {
                    'verified': True,
                    'confidence': 1.0,
                    'reason': 'not_applicable',
                    'need_human_judgment': False
                }

            # 6. 检查 HEAD 状态 (如果提交了HEAD job)
            # 注意：HEAD 检测只是附加信息，不影响 bisect 验证结果
            if verification_jobs.get('head_job_id'):
                head_result = self._check_head_regression(
                    verification_jobs['head_job_id'],
                    introduced_errids
                )
                result['head_check'] = head_result
                self.logger.info(f"HEAD check (informational only): {head_result.get('status', 'unknown')}")

            # 7. 整合验证结果
            git_ver = result.get('git_verification', {})
            result['status'] = 'success'
            result['verification_passed'] = git_ver.get('verified', True)
            result['verification_confidence'] = git_ver.get('confidence', 1.0)
            result['verification_reason'] = git_ver.get('reason', 'boundary_check_passed')

            self.logger.info("=" * 80)
            self.logger.info("Boundary verification COMPLETED")
            self.logger.info(f"Introduced {len(introduced_errids)} error IDs")
            self.logger.info(f"Verification: {'PASSED' if result['verification_passed'] else 'FAILED'}")
            self.logger.info(f"Confidence: {result['verification_confidence']}")
            if git_ver.get('need_human_judgment'):
                self.logger.warning("HUMAN JUDGMENT RECOMMENDED")
            self.logger.info("=" * 80)

            return result

        except Exception as e:
            self.logger.error(f"Boundary verification exception: {str(e)}")
            self.logger.error(traceback.format_exc())
            result['status'] = 'error'
            result['error'] = str(e)
            return result

    def _get_parent_commit_hash(self, commit: str) -> Optional[str]:
        """获取提交的父提交"""
        try:
            result = subprocess.run(
                ['git', '-C', self.work_dir, 'rev-parse', f'{commit}^1'],
                capture_output=True,
                text=True,
                check=True,
                timeout=30
            )
            parent = result.stdout.strip()
            return parent if parent else None
        except Exception as e:
            self.logger.error(f"Failed to get parent commit: {str(e)}")
            return None

    def _get_head_commit_hash(self) -> Optional[str]:
        """获取HEAD提交"""
        try:
            result = subprocess.run(
                ['git', '-C', self.work_dir, 'rev-parse', 'HEAD'],
                capture_output=True,
                text=True,
                check=True,
                timeout=30
            )
            head = result.stdout.strip()
            return head if head else None
        except Exception as e:
            self.logger.error(f"Failed to get HEAD commit: {str(e)}")
            return None

    def _submit_verification_jobs(self, parent_commit: str, head_commit: Optional[str],
                                  first_bad_commit: str) -> Dict[str, str]:
        """
        提交验证作业（parent和可选的HEAD）

        Returns:
            包含job_id的字典
        """
        jobs = {}

        try:
            # 提交 parent commit 作业
            self.logger.info(f"Submitting parent commit job: {parent_commit[:12]}")
            parent_job = self.init_job_content(self.bad_job_id)

            # 修改commit字段为parent commit
            if 'ss' in parent_job and 'linux' in parent_job['ss']:
                parent_job['ss']['linux']['commit'] = parent_commit
            elif 'program' in parent_job and 'makepkg' in parent_job['program']:
                parent_job['program']['makepkg']['commit'] = parent_commit
            else:
                self.logger.warning("Unrecognized job structure, attempting generic commit replacement")

            parent_job_id, parent_result_root = self.submit_job(parent_job, force=True)
            jobs['parent_job_id'] = parent_job_id
            jobs['parent_result_root'] = parent_result_root

            # 提交 HEAD commit 作业（可选）
            if head_commit and head_commit != first_bad_commit:
                self.logger.info(f"Submitting HEAD commit job: {head_commit[:12]}")
                head_job = self.init_job_content(self.bad_job_id)

                if 'ss' in head_job and 'linux' in head_job['ss']:
                    head_job['ss']['linux']['commit'] = head_commit
                elif 'program' in head_job and 'makepkg' in head_job['program']:
                    head_job['program']['makepkg']['commit'] = head_commit

                head_job_id, head_result_root = self.submit_job(head_job, force=True)
                jobs['head_job_id'] = head_job_id
                jobs['head_result_root'] = head_result_root

            return jobs

        except Exception as e:
            self.logger.error(f"Failed to submit verification jobs: {str(e)}")
            self.logger.error(traceback.format_exc())
            return jobs

    def _wait_for_verification_jobs(self, verification_jobs: Dict[str, str],
                                    timeout: int = None) -> bool:
        """
        等待验证作业完成

        Args:
            verification_jobs: 包含job_id的字典
            timeout: 超时时间（秒），默认使用配置

        Returns:
            是否所有作业都成功完成
        """
        if timeout is None:
            timeout = BisectConfig.VERIFICATION_TIMEOUT

        start_time = time.time()
        check_interval = 30  # 30秒检查一次

        parent_job_id = verification_jobs.get('parent_job_id')
        head_job_id = verification_jobs.get('head_job_id')

        parent_completed = False
        head_completed = True if not head_job_id else False

        self.logger.info(f"Waiting for verification jobs (timeout: {timeout}s)")

        while time.time() - start_time < timeout:
            # 检查 parent job
            if not parent_completed and parent_job_id:
                try:
                    stats, health = self._poll_job_stats(
                        parent_job_id,
                        verification_jobs.get('parent_result_root')
                    )
                    if stats and isinstance(stats, dict):
                        parent_completed = True
                        self.logger.info(f"Parent job completed: {parent_job_id}")
                except Exception as e:
                    self.logger.debug(f"Parent job still running: {str(e)}")

            # 检查 HEAD job
            if not head_completed and head_job_id:
                try:
                    stats, health = self._poll_job_stats(
                        head_job_id,
                        verification_jobs.get('head_result_root')
                    )
                    if stats and isinstance(stats, dict):
                        head_completed = True
                        self.logger.info(f"HEAD job completed: {head_job_id}")
                except Exception as e:
                    self.logger.debug(f"HEAD job still running: {str(e)}")

            # 检查是否都完成了
            if parent_completed and head_completed:
                self.logger.info("All verification jobs completed")
                return True

            # 等待后再检查
            time.sleep(check_interval)

        self.logger.error(f"Verification jobs timeout after {timeout}s")
        return False

    def _calculate_errid_diff(self, parent_job_id: str, bad_job_id: str) -> List[str]:
        """
        计算errid差异

        Args:
            parent_job_id: parent commit的job_id
            bad_job_id: bad commit的job_id

        Returns:
            引入的errid列表
        """
        try:
            # 获取parent job的errids
            parent_stats, _ = self._poll_job_stats(parent_job_id)
            parent_errids = self._extract_errids_from_stats(parent_stats)

            # 获取bad job的errids
            bad_stats, _ = self._poll_job_stats(bad_job_id)
            bad_errids = self._extract_errids_from_stats(bad_stats)

            # 计算差集
            parent_set = set(parent_errids)
            bad_set = set(bad_errids)
            introduced = list(bad_set - parent_set)

            self.logger.info(f"Errid diff: parent={len(parent_errids)}, bad={len(bad_errids)}, introduced={len(introduced)}")

            return introduced

        except Exception as e:
            self.logger.error(f"Failed to calculate errid diff: {str(e)}")
            return []

    def _extract_errids_from_stats(self, stats: Dict) -> List[str]:
        """
        从stats中提取所有errids

        支持多种 errid 存储方式：
        1. stats['errid'] - 标准字段（列表或字符串）
        2. stats 中以特定模式命名的 key（如 makepkg.eid.*）

        注意：过滤掉以 .msg 结尾的字段，这些是错误消息而不是错误ID
        """
        errids = []

        if not stats:
            return errids

        # 方式1: 从 errid 字段提取
        if 'errid' in stats:
            if isinstance(stats['errid'], list):
                errids.extend(stats['errid'])
            elif isinstance(stats['errid'], str):
                errids.append(stats['errid'])

        # 方式2: 从 stats keys 中查找 errid 模式
        # 常见模式: makepkg.eid.*, *.c:*, *.h:*, etc.
        errid_patterns = [
            'makepkg.eid.',  # makepkg build errors
            '.c:',           # C source file errors
            '.h:',           # Header file errors
            '.cpp:',         # C++ source file errors
            '.S:',           # Assembly file errors
        ]

        for key in stats.keys():
            # 跳过以 .msg 结尾的字段（错误消息，不是错误ID）
            if key.endswith('.msg'):
                continue

            # Check if key matches errid patterns
            if any(pattern in key for pattern in errid_patterns):
                # Verify it's actually an error/warning field (not a metric)
                value = stats[key]
                # Errids typically have value 1 or are string descriptions
                # Performance metrics are usually floats or large numbers
                if isinstance(value, str) or (isinstance(value, (int, float)) and value == 1):
                    errids.append(key)

        return list(set(errids))  # 去重

    def _check_head_regression(self, head_job_id: str, introduced_errids: List[str]) -> Dict:
        """
        检查HEAD是否存在回归（仅供参考，不影响 bisect 验证结果）

        Args:
            head_job_id: HEAD commit的job_id
            introduced_errids: first_bad_commit引入的errids

        Returns:
            HEAD检测结果字典（informational only）
        """
        try:
            head_stats, _ = self._poll_job_stats(head_job_id)
            head_errids = self._extract_errids_from_stats(head_stats)

            # 检查introduced_errids是否在HEAD中仍存在
            head_set = set(head_errids)
            regressed_errids = [eid for eid in introduced_errids if eid in head_set]

            if regressed_errids:
                status = 'regressed'
                self.logger.info(f"HEAD status (info): Error still present in HEAD - {len(regressed_errids)} errors found")
                self.logger.info(f"This indicates the issue has not been fixed yet in the current branch")
            else:
                status = 'fixed'
                self.logger.info(f"HEAD status (info): Error has been fixed in HEAD - all introduced errors are gone")

            return {
                'status': status,
                'head_job_id': head_job_id,
                'regressed_errids': regressed_errids,
                'regressed_count': len(regressed_errids)
            }

        except Exception as e:
            self.logger.error(f"Failed to check HEAD regression: {str(e)}")
            return {
                'status': 'error',
                'error': str(e)
            }

    def _extract_file_path_from_errid(self, errid: str) -> Optional[str]:
        """
        从 errid 中提取文件路径
        复用 _was_file_compiled 中的逻辑
        
        Args:
            errid: 错误ID，例如 "makepkg.eid.drivers/net/ethernet/.../file.c:warning:..."
        
        Returns:
            文件路径，如果无法提取则返回 None
        """
        file_path = None
        
        # Handle 'makepkg.eid.' format
        if errid.startswith('makepkg.eid.'):
            # Regex to capture file path ending in .c, .h, .S, .s from makepkg errids
            match = re.search(r'makepkg\.eid\.(.*?\.(c|h|S|s)):', errid)
            if match:
                file_path = match.group(1)
        else:
            # Handle standard "file:line" errid format
            match = re.match(r'([^:]+):\d+', errid)
            if match:
                file_path = match.group(1)
        
        if file_path:
            self.logger.debug(f"Extracted file path from errid: '{file_path}'")
        else:
            self.logger.warning(f"Could not parse file path from errid: {errid}")
        
        return file_path

    def _check_file_modified_in_commit(self, commit: str, file_path: str) -> Optional[bool]:
        """
        检查文件是否在指定提交中被修改
        
        使用 git diff-tree 检查单个提交，这是最精确的方法
        
        Args:
            commit: 提交哈希
            file_path: 文件路径
        
        Returns:
            True 如果文件被修改，False 如果未被修改，None 如果检查失败
        """
        try:
            result = subprocess.run(
                ['git', '-C', self.work_dir, 'diff-tree', 
                 '--no-commit-id', '--name-only', '-r', commit],
                capture_output=True,
                text=True,
                check=True,
                timeout=30
            )
            modified_files = result.stdout.strip().split('\n')
            is_modified = file_path in modified_files
            
            self.logger.debug(f"File '{file_path}' {'was' if is_modified else 'was not'} modified in commit {commit[:8]}")
            return is_modified
            
        except Exception as e:
            self.logger.error(f"Failed to check file modification in commit: {str(e)}")
            return None

    def _check_file_modified_in_range(self, parent_commit: str, first_bad_commit: str, 
                                      file_path: str) -> Tuple[Optional[bool], List[str]]:
        """
        检查文件是否在提交范围内被修改
        
        使用 git rev-list 获取所有修改该文件的提交，这是最快的方法
        
        Args:
            parent_commit: 父提交
            first_bad_commit: first_bad 提交
            file_path: 文件路径
        
        Returns:
            (是否被修改, 相关提交列表)
        """
        try:
            result = subprocess.run(
                ['git', '-C', self.work_dir, 'rev-list', 
                 f'{parent_commit}..{first_bad_commit}', '--', file_path],
                capture_output=True,
                text=True,
                check=True,
                timeout=30
            )
            commits = [c for c in result.stdout.strip().split('\n') if c]
            is_modified = len(commits) > 0
            
            if commits:
                self.logger.debug(f"File '{file_path}' was modified in {len(commits)} commits in range")
            else:
                self.logger.debug(f"File '{file_path}' was NOT modified in range")
            
            return is_modified, commits
            
        except Exception as e:
            self.logger.error(f"Failed to check file modification in range: {str(e)}")
            return None, []

    def _verify_errids_with_git(self, parent_commit: str, first_bad_commit: str,
                                introduced_errids: List[str]) -> Dict[str, Any]:
        """
        使用 git 命令验证当前 error_id 相关的文件是否真的被修改

        这个方法仅适用于构建任务（build task），因为只有构建错误才有明确的文件路径
        注意：为了避免生成过大的日志文件，只检查当前正在 bisect 的 error_id，而不是所有引入的 errids

        前提条件（由 _calculate_errid_diff 保证）:
        - first_bad_commit 包含这些 errids
        - parent 不包含这些 errids

        验证逻辑:
        1. if (first_bad有) AND (parent没有) AND (git文件被修改):
           => bisect success (confidence: 1.0)
        2. elif (first_bad有) AND (parent没有) BUT (git文件未被修改):
           => bisect success but need human judgment (confidence: 0.5)
        3. 其他情况 => bisect failed

        Args:
            parent_commit: 父提交
            first_bad_commit: first_bad 提交
            introduced_errids: 引入的 errid 列表（已确认在first_bad中存在，parent中不存在）

        Returns:
            {
                'verified': bool,              # 验证是否通过
                'confidence': float,           # 置信度 0.0-1.0
                'reason': str,                 # 原因说明
                'need_human_judgment': bool,   # 是否需要人工判断
                'file_analysis': [...],        # 每个文件的详细分析
                'stats': {...}                 # 统计信息
            }
        """
        self.logger.info("=" * 80)
        self.logger.info("Starting git-based errid verification (build task only)")

        # 只检查当前正在 bisect 的 error_id，避免日志过大
        target_errid = self.error_id if self.error_id else (introduced_errids[0] if introduced_errids else None)

        if not target_errid:
            self.logger.warning("No target error_id to verify")
            return {
                'verified': True,
                'confidence': 0.5,
                'reason': 'no_target_error_id',
                'need_human_judgment': True,
                'file_analysis': [],
                'stats': {
                    'total_errids': 0,
                    'valid_errids': 0,
                    'files_not_extracted': 0
                }
            }

        self.logger.info(f"Checking only current error_id: {target_errid}")
        self.logger.info(f"(Skipping {len(introduced_errids) - 1} other introduced errids to reduce log size)")

        file_analysis = []
        files_modified_in_first_bad = 0
        files_modified_in_range = 0
        files_not_extracted = 0
        unique_files = set()

        # 只处理目标 errid
        errid = target_errid
        file_path = self._extract_file_path_from_errid(errid)

        if not file_path:
            files_not_extracted = 1
            file_analysis.append({
                'errid': errid,
                'file_path': None,
                'extraction_failed': True
            })
        else:
            unique_files.add(file_path)

            # 检查 first_bad 提交本身
            modified_in_first_bad = self._check_file_modified_in_commit(first_bad_commit, file_path)

            # 检查整个范围
            modified_in_range, relevant_commits = self._check_file_modified_in_range(
                parent_commit, first_bad_commit, file_path
            )

            if modified_in_first_bad:
                files_modified_in_first_bad = 1
            if modified_in_range:
                files_modified_in_range = 1

            file_analysis.append({
                'errid': errid,
                'file_path': file_path,
                'modified_in_first_bad': modified_in_first_bad,
                'modified_in_range': modified_in_range,
                'relevant_commits': relevant_commits[:3] if relevant_commits else []  # 只记录前3个
            })

        # 决策逻辑（现在只基于单个 error_id）
        total_errids = 1  # 只检查一个 error_id
        valid_errids = 1 - files_not_extracted
        unique_file_count = len(unique_files)
        
        self.logger.info(f"Extracted {valid_errids}/{total_errids} file paths, {unique_file_count} unique files")
        self.logger.info(f"Files modified in first_bad: {files_modified_in_first_bad}")
        self.logger.info(f"Files modified in range: {files_modified_in_range}")

        # 如果无法提取文件路径 - 跳过 git 验证
        if valid_errids == 0:
            result = {
                'verified': True,  # 无法验证，但不影响 bisect 结果
                'confidence': 0.5,
                'reason': 'cannot_extract_file_path_skip_git_verification',
                'need_human_judgment': True,
                'file_analysis': file_analysis,
                'stats': {
                    'total_errids': total_errids,
                    'valid_errids': 0,
                    'files_not_extracted': files_not_extracted,
                    'target_errid': target_errid
                }
            }
            self.logger.warning(f"Cannot extract file path from error_id '{target_errid}', skipping git verification")
            return result

        # 简单判断：文件是否被修改
        files_modified = (files_modified_in_range > 0)

        # 前提: first_bad有errid (True), parent没有errid (True，因为来自introduced_errids)

        # 情况1: first_bad有 AND parent没有 AND 文件被修改
        if files_modified:
            verified = True
            confidence = 1.0
            reason = 'file_modified_bisect_success'
            need_human_judgment = False
            self.logger.info(f"Verification PASSED: File '{file_path}' was modified (confidence: {confidence})")

        # 情况2: first_bad有 AND parent没有 BUT 文件未被修改
        else:
            verified = True  # bisect 本身成功找到了 first_bad
            confidence = 0.5
            reason = 'file_not_modified_likely_environmental_need_human_judgment'
            need_human_judgment = True
            self.logger.warning(f"Verification PASSED (environmental): File '{file_path}' not modified (confidence: {confidence}), likely compiler/config issue, need human judgment")
        
        result = {
            'verified': verified,
            'confidence': confidence,
            'reason': reason,
            'need_human_judgment': need_human_judgment,
            'file_analysis': file_analysis,
            'stats': {
                'target_errid': target_errid,
                'total_introduced_errids': len(introduced_errids),
                'checked_errids': 1,  # 只检查了一个
                'valid_errids': valid_errids,
                'unique_files': unique_file_count,
                'files_modified_in_first_bad': files_modified_in_first_bad,
                'files_modified_in_range': files_modified_in_range,
                'files_not_extracted': files_not_extracted,
                'file_modified': files_modified
            }
        }

        self.logger.info("=" * 80)
        return result

    def _verify_skip_candidates(self, candidates: List[str], max_verify: int = None) -> Optional[str]:
        """
        验证skip结果的候选提交，尝试找出真正的first bad commit

        Args:
            candidates: 候选提交列表
            max_verify: 最多验证的候选数量

        Returns:
            找到的第一个坏提交，如果无法确定则返回None
        """
        if max_verify is None:
            max_verify = min(len(candidates), BisectConfig.SKIP_RESULT_MAX_CANDIDATES)

        self.logger.info(f"Verifying {max_verify} skip candidates out of {len(candidates)}")

        verified_bad = None

        for i, commit in enumerate(candidates[:max_verify]):
            self.logger.info(f"Testing candidate {i+1}/{max_verify}: {commit[:8]}")

            # 测试这个候选提交
            commit_status = self.get_commit_status_by_job(commit)

            if commit_status == 'bad':
                self.logger.info(f"Found bad commit: {commit[:8]}")

                # 检查其父提交是否是good
                parent = self._get_parent_commit(commit)
                if parent:
                    parent_status = self.get_commit_status_by_job(parent)
                    if parent_status == 'good':
                        self.logger.info(f"Confirmed: {commit[:8]} is the first bad commit")
                        verified_bad = commit
                        break
                    elif parent_status == 'bad':
                        self.logger.info(f"Parent {parent[:8]} is also bad, continuing search")
                    else:
                        self.logger.warning(f"Parent {parent[:8]} status is {parent_status}")

            elif commit_status == 'good':
                self.logger.info(f"Commit {commit[:8]} is good, skipping")
            else:
                self.logger.warning(f"Commit {commit[:8]} status is {commit_status}")

        return verified_bad

    def analyse_result(self, result):
        """
        Analyze bisect results and compile log data.
        Args:
            result (str): Raw output from bisect operation
        Returns:
            dict: Structured bisect log data
        """
        if not result.strip():
            raise RuntimeError("Empty bisect result - no output from git bisect")

        # 检查是否是skip导致的多候选结果
        skip_pattern = r"There are only 'skip'ped commits left to test\.\s*The first bad commit could be any of:\s*((?:[a-f0-9]{40}\s*)+)"
        skip_match = re.search(skip_pattern, result, re.MULTILINE | re.DOTALL)

        if skip_match:
            # skip结果：提取所有候选commit
            candidates_text = skip_match.group(1).strip()
            candidate_commits = re.findall(r'([a-f0-9]{40})', candidates_text)

            self.logger.info(f"Skip result: found {len(candidate_commits)} candidate bad commits")

            # Try to verify candidate commits to find the real first bad commit
            verified_bad = self._verify_skip_candidates(candidate_commits)

            if verified_bad:
                self.logger.info(f"Successfully verified {verified_bad[:8]} as the first bad commit")
                first_bad_commit = verified_bad
                # 构建标准结果（已验证）
                return self._build_standard_result(first_bad_commit, skip_verified=True)
            else:
                # 无法确定，使用第一个候选
                if candidate_commits:
                    first_bad_commit = candidate_commits[0]
                    self.logger.info(f"Cannot verify candidates, using first candidate: {first_bad_commit[:8]}")
                    return self._build_skip_result(first_bad_commit, candidate_commits, result)
                else:
                    raise RuntimeError("No valid candidate commits found in skip result")

        # 标准bisect结果：查找唯一的first bad commit
        match = re.search(r"\b([a-f0-9]{40})\b.*is the first bad commit", result)
        if not match:
            raise RuntimeError(
                f"Failed to identify bad commit in bisect output\n"
                f"First 200 characters:\n{result[:200]}"
            )

        first_bad_commit = match.group(1)
        self.logger.info("Successfully parsed first bad commit: %s", first_bad_commit)

        # Build standard result data
        return self._build_standard_result(first_bad_commit)

    def _normalize_commit_hash(self, commit_ref: str) -> Optional[str]:
        """
        将 commit 引用（tag、短 hash、完整 hash）规范化为完整的 commit hash

        Args:
            commit_ref: commit 引用（可以是 tag 如 v6.6-rc1，短 hash，或完整 hash）

        Returns:
            完整的 40 位 commit hash，失败返回 None
        """
        try:
            result = subprocess.run(
                ['git', '-C', self.work_dir, 'rev-parse', commit_ref],
                capture_output=True,
                text=True,
                check=True,
                timeout=30
            )
            normalized = result.stdout.strip()
            if normalized and len(normalized) == 40:
                self.logger.debug(f"Normalized '{commit_ref}' to '{normalized}'")
                return normalized
            else:
                self.logger.warning(f"Invalid commit hash length for '{commit_ref}': {len(normalized)}")
                return None
        except subprocess.CalledProcessError as e:
            self.logger.error(f"Failed to normalize commit reference '{commit_ref}': {e.stderr}")
            return None
        except Exception as e:
            self.logger.error(f"Unexpected error normalizing '{commit_ref}': {str(e)}")
            return None

    def _get_change_description(self) -> str:
        """

        Returns:
            描述bisect发现的变化的字符串
        """
        if self.error_id:
            return f"First commit introducing error '{self.error_id}'"

        elif self.metric:
            if self.direction == -1:
                # direction=-1: 数值上升表示性能变差
                trend = "increased"
                impact = "regression"
            else:
                # direction=1: 数值下降表示性能变差
                trend = "decreased"
                impact = "regression"

            return f"First commit where '{self.metric}' {trend} significantly (performance {impact})"

        return "First commit introducing the observed change"

    def _build_skip_result(self, first_bad_commit, candidate_commits, result):
        """构建skip结果的数据结构"""
        first_bad_id, bad_result_root = self.get_id_and_result_root_by_commit(first_bad_commit)
        if not first_bad_id:
            self.logger.warning("Skip result: first_bad_commit %s was not actually tested", first_bad_commit)
            # For skip results, this is normal and should not raise exception

        return {
            # 技术字段（git bisect术语）
            'first_bad_commit': first_bad_commit,
            'first_bad_id': first_bad_id,
            'bad_result_root': bad_result_root,
            'bisect_type': 'skip',  # 标记为skip类型结果

            'change_point': first_bad_commit,
            'change_description': self._get_change_description(),
            'bisect_range': {
                'start_commit': self.good_commit,
                'end_commit': self.bad_commit
            },

            # Skip特有字段
            'candidate_commits': candidate_commits,  # 保存所有候选commit
            'candidate_count': len(candidate_commits),

            # 时间和输出
            'start_time': self.bisect_start_time,
            'end_time': self.bisect_end_time,
            'bisect_output': self._format_bisect_output(result)
        }

        # Generate and save visualization for skip results
        self._generate_bisect_visualization()

        return skip_result

    def _build_standard_result(self, first_bad_commit, skip_verified=False):
        """
        构建标准bisect结果的数据结构

        Args:
            first_bad_commit: 找到的第一个坏提交
            skip_verified: 是否是通过验证skip候选得到的结果
        """
        # 规范化 bad_commit 为完整的 commit hash，处理 tag 或短 hash 的情况
        normalized_bad_commit = self._normalize_commit_hash(self.bad_commit)

        # 如果 first_bad_commit 就是原始的 bad_commit，直接使用 bad_job_id
        # 先尝试规范化的比较，如果规范化失败则使用原始字符串比较
        is_same_commit = False
        if normalized_bad_commit and first_bad_commit == normalized_bad_commit:
            is_same_commit = True
            self.logger.info(f"first_bad_commit matches normalized bad_commit (resolved tag/hash)")
        elif first_bad_commit == self.bad_commit:
            is_same_commit = True
            self.logger.info(f"first_bad_commit matches original bad_commit (direct string match)")

        if is_same_commit:
            first_bad_id = self.bad_job_id
            # 从数据库获取 result_root
            try:
                job_info = self.bisect_db.get_job_info(self.bad_job_id)
                bad_result_root = job_info.get('result_root') if job_info else None
            except Exception as e:
                self.logger.warning(f"Failed to get result_root for bad_job_id: {e}")
                bad_result_root = None
            self.logger.info(f"Using bad_job_id: {first_bad_id}")
        else:
            first_bad_id, bad_result_root = self.get_id_and_result_root_by_commit(first_bad_commit)
            if not first_bad_id:
                self.logger.error("first_bad_commit %s was not actually tested, bisect result invalid", first_bad_commit)
                raise RuntimeError("Bisect result verification failed: first_bad_commit not tested")

        all_bisect_log = {
            # 技术字段（git bisect术语，保持兼容性）
            'first_bad_commit': first_bad_commit,
            'first_bad_id': first_bad_id,
            'bad_result_root': bad_result_root,
            'bisect_type': 'skip_verified' if skip_verified else 'standard',  # 标记结果类型

            'change_point': first_bad_commit,
            'change_description': self._get_change_description(),
            'bisect_range': {
                'start_commit': self.good_commit,
                'end_commit': self.bad_commit
            },

            # 时间和输出
            'start_time': self.bisect_start_time,
            'end_time': self.bisect_end_time
        }

        # 执行结果验证（如果启用）
        if BisectConfig.ENABLE_RESULT_VERIFICATION and not skip_verified:
            verification_result = self._verify_bisect_result(first_bad_commit)
            all_bisect_log['verification'] = verification_result

            if not verification_result.get('verified', False):
                self.logger.warning("WARNING: Bisect result verification failed")
                self.logger.warning(f"Reason: {verification_result.get('reason', 'unknown')}")
                # 根据配置决定是否继续
                if os.environ.get('BISECT_STRICT_VERIFICATION', '').lower() in ('1', 'true', 'yes'):
                    raise RuntimeError(f"Bisect verification failed: {verification_result.get('reason')}")
        elif skip_verified:
            # 如果是通过验证skip候选得到的，标记为已验证
            all_bisect_log['verification'] = {
                'verified': True,
                'reason': 'skip_candidates_verified',
                'confidence': 1.0
            }

        # 边界验证和HEAD检测（仅在非性能bisect时执行）
        if not self.metric and BisectConfig.ENABLE_BOUNDARY_VERIFICATION:
            try:
                self.logger.info("开始边界验证和HEAD检测")
                boundary_result = self._perform_boundary_verification(first_bad_commit, first_bad_id)
                all_bisect_log['boundary_verification'] = boundary_result

                if boundary_result.get('status') == 'success':
                    self.logger.info(f"边界验证成功 | introduced_errids: {len(boundary_result.get('introduced_errids', []))}")
                else:
                    self.logger.warning(f"边界验证失败 | reason: {boundary_result.get('error', 'unknown')}")
            except Exception as e:
                self.logger.error(f"边界验证异常: {str(e)}")
                self.logger.error(traceback.format_exc())
                all_bisect_log['boundary_verification'] = {
                    'status': 'error',
                    'error': str(e)
                }

        # Generate and save visualization
        self._generate_bisect_visualization()

        self.analysis_complete = True
        return all_bisect_log
        
    def _format_bisect_output(self, raw_output):
        """提取bisect输出中的最终结果部分"""
        if not raw_output:
            return ""

        # 清理转义字符
        formatted = raw_output.replace("\\'", "'").replace('\\"', '"')
        lines = formatted.split('\n')

        # 查找 "is the first bad commit" 所在行的索引
        result_index = -1
        for i, line in enumerate(lines):
            if ' is the first bad commit' in line:
                result_index = i
                break

        # 如果找到了结果行,提取从该行开始到结尾的所有内容
        if result_index >= 0:
            result_lines = lines[result_index:]
            result = '\n'.join(line.strip() for line in result_lines if line.strip())
        else:
            # 如果没有找到结果,返回空字符串
            result = ""

        # 确保结果不包含有问题的字符
        result = result.replace('\0', '').replace('\r', '')

        return result

    def get_id_and_result_root_by_commit(self, commit: str) -> Tuple[str, str]:
        """根据commit从commit_jobs.jsonl查找对应的job_id和result_root"""
        commit_jobs_path = os.path.join(self.temp_result_root, 'commit_jobs.jsonl')
        
        if not os.path.exists(commit_jobs_path):
            self.logger.warning("Commit jobs record not found", path=commit_jobs_path)
            return None, None

        latest_job_id = None
        latest_result_root = None
        
        try:
            with open(commit_jobs_path, 'r', encoding='utf-8') as f:
                for line in f:
                    try:
                        record = json.loads(line.strip())
                        if record.get('commit') == commit:
                            latest_job_id = record.get('job_id')
                            latest_result_root = record.get('job_result_root')
                            self.logger.debug("Found matching commit record",
                                       commit=commit,
                                       job_id=latest_job_id)
                    except json.JSONDecodeError as e:
                        self.logger.warning("Invalid JSON line in commit jobs file",
                                     error=str(e),
                                     line=line)
                        continue

            if latest_job_id and latest_result_root:
                self.logger.info("Resolved commit to job details",
                          commit=commit[:7],
                          job_id=latest_job_id,
                          result_root=latest_result_root)
                return latest_job_id, latest_result_root

            self.logger.warning("No matching commit found in records", commit=commit)
            return None, None

        except IOError as e:
            self.logger.error("Failed to read commit jobs file",
                       path=commit_jobs_path,
                       error=str(e))
            return None, None
    def _generate_and_save_visualizations(self, bisect_result: dict) -> Optional[str]:
        """
        Generate and save all visualizations after successful bisect

        Args:
            bisect_result: The bisect result dictionary

        Returns:
            Path to visualization file if successful, None otherwise
        """
        try:
            if not self.temp_result_root or not os.path.exists(self.temp_result_root):
                self.logger.warning("Result directory not available for visualization")
                return None

            # Store first_bad_commit for visualization
            self.first_bad_commit = bisect_result.get('first_bad_commit')

            # Generate visualization
            viz_content = self._generate_bisect_visualization()

            if viz_content:
                # Save to multiple formats for accessibility

                # 1. Save as text file
                viz_txt_path = os.path.join(self.temp_result_root, 'bisect_visualization.txt')
                with open(viz_txt_path, 'w', encoding='utf-8') as f:
                    f.write(viz_content)

                # 2. Save summary in JSON format
                viz_json_path = os.path.join(self.temp_result_root, 'bisect_summary.json')
                summary = {
                    'bisect_type': bisect_result.get('bisect_type', 'unknown'),
                    'change_point': bisect_result.get('change_point'),
                    'change_description': bisect_result.get('change_description'),
                    'start_commit': self.good_commit,
                    'end_commit': self.bad_commit,
                    'first_bad_commit': self.first_bad_commit,
                    'metric': self.metric if self.metric else None,
                    'error_id': self.error_id if self.error_id else None,
                    'mid_point': self.mid_point if self.metric else None,
                    'direction': self.direction if self.metric else None,
                    'candidate_commits': bisect_result.get('candidate_commits', []),
                    'start_time': bisect_result.get('start_time'),
                    'end_time': bisect_result.get('end_time'),
                    'visualization_generated': True
                }

                with open(viz_json_path, 'w', encoding='utf-8') as f:
                    json.dump(summary, f, indent=2, ensure_ascii=False)

                self.logger.info(f"Visualization files created successfully")
                self.logger.info(f"  Text visualization: {viz_txt_path}")
                self.logger.info(f"  JSON summary: {viz_json_path}")

                # Print to console if verbose mode
                if os.environ.get('BISECT_SHOW_VISUALIZATION', '').lower() in ('1', 'true', 'yes'):
                    print("\n" + "=" * 120)
                    print("BISECT RESULT VISUALIZATION")
                    print("=" * 120)
                    print(viz_content)
                    print("=" * 120 + "\n")

                return viz_txt_path

            return None

        except Exception as e:
            self.logger.error(f"Failed to generate visualizations: {str(e)}")
            # Don't fail the bisect process due to visualization error
            return None

    def _generate_bisect_visualization(self) -> Optional[str]:
        """Generate ASCII visualization of bisect results and return as string"""
        try:
            # Read commit jobs data
            commit_jobs_path = os.path.join(self.temp_result_root, 'commit_jobs.jsonl')
            if not os.path.exists(commit_jobs_path):
                self.logger.warning("Cannot generate visualization: commit_jobs.jsonl not found")
                return None

            commits_data = []
            with open(commit_jobs_path, 'r', encoding='utf-8') as f:
                for line in f:
                    try:
                        record = json.loads(line.strip())
                        commits_data.append(record)
                    except json.JSONDecodeError:
                        continue

            if not commits_data:
                self.logger.warning("No commit data found for visualization")
                return None

            # Create visualizer
            visualizer = BisectVisualizer(width=120, height=20)

            # Build complete visualization content
            viz_parts = []

            # Generate appropriate visualization based on bisect type
            if self.metric:
                # Performance bisect visualization
                performance_data = []
                for record in commits_data:
                    # First try to use locally stored metric_value
                    metric_value = record.get('metric_value')

                    # If not available locally, query from database
                    if metric_value is None:
                        job_id = record.get('job_id')
                        if job_id:
                            job_info = self.bisect_db.get_job_info(job_id)
                            if job_info and 'stats' in job_info:
                                metric_value = job_info['stats'].get(self.metric)

                    if metric_value is not None:
                        # Recalculate status based on mid_point and direction
                        # to ensure consistency in visualization
                        if self.mid_point is not None and self.direction is not None:
                            if (float(metric_value) - self.mid_point) * self.direction > 0:
                                status = 'good'
                            else:
                                status = 'bad'
                        else:
                            # Fallback to recorded status if mid_point not available
                            status = record.get('status', 'unknown')

                        performance_data.append({
                            'commit': record['commit'][:7],
                            'full_commit': record['commit'],
                            'value': float(metric_value),
                            'status': status,
                            'job_id': record.get('job_id'),
                            'timestamp': record.get('timestamp')
                        })

                if performance_data:
                    visualization = visualizer.visualize_performance_bisect(
                        performance_data,
                        metric_name=self.metric,
                        mid_point=self.mid_point,
                        direction=self.direction or 1
                    )
                    viz_parts.append(visualization)

                    # Add detailed data table
                    viz_parts.append("\n" + "=" * 120 + "\n")
                    viz_parts.append("Detailed Performance Data\n")
                    viz_parts.append("=" * 120 + "\n")
                    viz_parts.append(f"{'Commit':<45} {'Job ID':<20} {'Value':>12} {'Status':<8} {'Timestamp'}\n")
                    viz_parts.append("-" * 120 + "\n")
                    for data in performance_data:
                        viz_parts.append(
                            f"{data['full_commit']:<45} "
                            f"{data.get('job_id', 'N/A'):<20} "
                            f"{data['value']:>12.2f} "
                            f"{data['status']:<8} "
                            f"{data.get('timestamp', 'N/A')}\n"
                        )
                else:
                    self.logger.warning("No performance data points for visualization")
                    return None
            else:
                # Functional bisect visualization
                functional_data = [
                    {
                        'commit': record['commit'][:7],
                        'status': record.get('status', 'unknown')
                    }
                    for record in commits_data
                ]

                visualization = visualizer.visualize_functional_bisect(functional_data)
                viz_parts.append(visualization)

            # Add git tree visualization if repo is available
            if hasattr(self, 'work_dir') and os.path.exists(os.path.join(self.work_dir, '.git')):
                viz_parts.append("\n" + "=" * 120 + "\n")

                # Prepare tested commits dict
                tested_dict = {record['commit']: record.get('status', 'unknown') for record in commits_data}

                # Generate git tree visualization
                tree_viz = visualizer.visualize_git_tree(
                    self.work_dir,
                    self.good_commit,
                    self.bad_commit,
                    tested_dict,
                    max_commits=100
                )
                viz_parts.append(tree_viz)

                # If we found a first bad commit, add merge analysis
                if hasattr(self, 'first_bad_commit') and self.first_bad_commit:
                    viz_parts.append("\n" + "=" * 120 + "\n")
                    merge_analysis = visualizer.visualize_merge_analysis(
                        self.work_dir,
                        self.first_bad_commit,
                        tested_dict
                    )
                    viz_parts.append(merge_analysis)

            # Add bisect progression summary
            viz_parts.append("\n" + "=" * 120 + "\n")
            all_commits = []
            if hasattr(self, 'work_dir') and os.path.exists(os.path.join(self.work_dir, '.git')):
                try:
                    cmd = ['git', '-C', self.work_dir, 'rev-list', f'{self.good_commit}..{self.bad_commit}']
                    result = subprocess.run(cmd, capture_output=True, text=True, check=True)
                    all_commits = result.stdout.strip().split('\n') if result.stdout.strip() else []
                except:
                    pass

            if all_commits:
                tested_commits = [
                    {'commit': record['commit'], 'status': record.get('status', 'unknown')}
                    for record in commits_data
                ]
                progress_viz = visualizer.visualize_bisect_progression(
                    tested_commits,
                    all_commits,
                    current_commit=None
                )
                viz_parts.append(progress_viz)

            return "\n".join(viz_parts)

        except Exception as e:
            self.logger.error(f"Failed to generate bisect visualization: {str(e)}")
            # Don't fail the bisect process due to visualization error
            return None

    def _filter_recent_jobs(self, job_entries: list, max_age_hours: int = 24) -> list:
        """Filter recent valid jobs with ISO datetime support"""
        valid = []
        cutoff = time.time() - max_age_hours * 3600

        for job_id, result_root in job_entries:
            try:
                job = self.bisect_db.get_job_info(job_id)
                if not job:
                    continue

                # Handle both datetime formats
                submit_time_str = job.get('submit_time')
                if not submit_time_str:
                    continue

                try:
                    # Try ISO format with timezone first
                    submit_time = datetime.fromisoformat(submit_time_str.replace('Z', '+00:00'))
                except ValueError:
                    # Fallback to simple format
                    submit_time = datetime.strptime(submit_time_str, '%Y-%m-%d %H:%M:%S')

                if submit_time.timestamp() > cutoff:
                    valid.append((job_id, result_root))

            except Exception as e:
                self.logger.warning(f"Skipping job {job_id}: Failed to parse timestamp - {str(e)}")
                continue

        return valid

class JobSubmissionError(Exception):
    """Custom exception for job submission failures"""

class JobStatusError(Exception):
    """Base class for job status errors"""

class JobStatusTimeoutError(JobStatusError):
    """Exception for job status polling timeouts"""

class BisectError(Exception):
    """Bisect操作的基础异常类"""
    pass

class GitOperationError(BisectError):
    """Git操作相关错误"""
    pass

class CloneError(GitOperationError):
    """Git克隆错误"""
    def __init__(self, url, retries):
        self.repo_url = url
        self.retry_count = retries
        super().__init__(f"Failed to clone {url} after {retries} retries")

class CheckoutError(GitOperationError):
    """Git检出错误"""
    def __init__(self, commit, repo_path):
        self.commit = commit
        self.repo_path = repo_path
        super().__init__(f"Checkout failed for {commit} in {repo_path}")

class InvalidCommitError(GitOperationError):
    """Invalid Git commit hash error"""
    pass


