#!/usr/bin/env python3
# SPDX-License-Identifier: MulanPSL-2.0+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.

"""
Bisect 任务批量分析工具

本工具用于分析 bisect 任务的执行情况和质量指标。

Updated: 2025-11-05
- 简化成功率计算：直接判断 bisect_status='success' 即为成功
- 集成两步验证机制后，不再需要单独检查 verification_status
- 成功率计算基于 bisect_status='success' 的任务数
"""

import os
import sys
import csv
import json
import logging
from datetime import datetime, timedelta
from collections import defaultdict
import argparse

# Try to import tabulate for nice table formatting
try:
    from tabulate import tabulate
    HAS_TABULATE = True
except ImportError:
    HAS_TABULATE = False

# 添加 bisect 模块路径
sys.path.append(os.path.join(os.environ['LKP_SRC'], 'programs/bisect-py'))

# 使用统一的日志配置
from bisect_log_config import logger
# 导入 ManticoreClient
from manticore_simple import ManticoreClient

class BisectAnalysis:
    """Bisect 任务批量分析工具"""
    
    def __init__(self, db_config=None):
        # 数据库配置
        db_config = db_config or {}
        host = db_config.get('host', os.getenv('MANTICORE_HOST', 'localhost'))
        port = int(db_config.get('port', os.getenv('MANTICORE_PORT', '9306')))
        if port == 9306:
            port = 9308 # 使用 HTTP 端口
        
        self.client = ManticoreClient(host=host, port=port)
        self.jobs_db = os.getenv('MANTICORE_DB', 'jobs')
        self.bisect_db = 'bisect' # bisect 任务通常在独立的索引中
        logger.info(f"Manticore HTTP client initialized: {host}:{port}")

    def query_bisect_tasks(self, hours=24, status=None, limit=1000, include_failed=False):
        """
        Query bisect tasks (using HTTP API)

        :param hours: Query time range (based on updated_at field)
        :param status: Bisect status filter
        :param limit: Maximum number of results to return
        :param include_failed: Whether to only query failed tasks
        """
        try:
            # Calculate time range
            cutoff_time = datetime.now() - timedelta(hours=hours)
            unix_cutoff = int(cutoff_time.timestamp())

            # Build query conditions - use updated_at field instead of submit_time
            # because submit_time might be very early, but task might have completed recently
            must_conditions = [{"range": {"updated_at": {"gte": unix_cutoff}}}]

            # If querying failed tasks
            if include_failed:
                must_conditions.append({"equals": {"bisect_status": "failed"}})
            elif status:
                must_conditions.append({"equals": {"bisect_status": status}})

            query = {"bool": {"must": must_conditions}}
            sort = [{"updated_at": {"order": "desc"}}]

            logger.debug(f"Executing Manticore query on '{self.bisect_db}' index: {json.dumps(query, indent=2)}")

            results = self.client.search(
                index=self.bisect_db,
                query=query,
                limit=limit,
                sort=sort,
                options={'max_matches': limit}
            )

            if results is None:
                logger.error("Query failed, client returned None")
                return []

            # Convert timestamps
            for task in results:
                task['_start_timestamp'] = task.get('start_time')
                task['_end_timestamp'] = task.get('end_time')

                if task['_start_timestamp']:
                    task['start_time'] = datetime.fromtimestamp(task['_start_timestamp']).strftime('%Y-%m-%d %H:%M:%S')
                else:
                    task['start_time'] = 'N/A'

                if task['_end_timestamp']:
                    task['end_time'] = datetime.fromtimestamp(task['_end_timestamp']).strftime('%Y-%m-%d %H:%M:%S')
                else:
                    task['end_time'] = ''

            return results

        except Exception as err:
            logger.error(f"Query failed: {err}")
            return []

    def get_sample_tasks(self):
        """
        Get sample task data for demonstrating HEAD test check functionality
        """
        import time
        current_time = int(time.time())

        sample_tasks = [
            {
                'id': 'bisect_001',
                'bad_job_id': '25110517160800001',
                'bisect_status': 'success',
                'first_bad_commit': 'a1b2c3d4e5f6',
                'error_id': 'boot.error.boot_failures',
                'bisect_result_root': '/result/bisect/results/kernel/2025-11-06/25110517160800001/a1b2c3d4e5f6/001',
                'submit_time': current_time - 4200,
                'start_time': current_time - 3600,
                'end_time': current_time - 1800,
                'j': {
                    'head_check_status': 'fixed',
                    'head_check_at': current_time - 1200,
                    'head_check_commit': 'ee55f6a88c1f70120d9cec8244c5cd10f0c4dce9',
                    'head_check_job_id': '25102311205416400',
                    'regressed_errids': []
                }
            },
            {
                'id': 'bisect_002',
                'bad_job_id': '25110517160800002',
                'bisect_status': 'success',
                'first_bad_commit': 'b2c3d4e5f6g7',
                'error_id': 'stderr.BUG:kernel_NULL_pointer',
                'bisect_result_root': '/result/bisect/results/kernel/2025-11-06/25110517160800002/b2c3d4e5f6g7/002',
                'submit_time': current_time - 7800,
                'start_time': current_time - 7200,
                'end_time': current_time - 5400,
                'j': {
                    'head_check_status': 'regress',
                    'head_check_at': current_time - 4800,
                    'head_check_commit': 'ff66f7b99d2f81231e9df9355d6e11e1f5d5e7f0',
                    'head_check_job_id': '25102311205416500',
                    'regressed_errids': ['error_001', 'error_002']
                }
            },
            {
                'id': 'bisect_003',
                'bad_job_id': '25110517160800003',
                'bisect_status': 'failed',
                'first_bad_commit': '',
                'error_id': 'dmesg.WARNING:at#for_ip_swiotlb_free',
                'bisect_result_root': '/result/bisect/results/kernel/2025-11-06/25110517160800003/null/003',
                'submit_time': current_time - 11400,
                'start_time': current_time - 10800,
                'end_time': current_time - 9000,
                'last_error': 'Task failed: Bisect failed: Failed to find valid good commit',
                'j': {
                    'head_check_status': 'fixed',
                    'head_check_at': current_time - 8400,
                    'head_check_commit': 'gg77g8caae3f92342f0ea0466e7f22f2g6f6f8g1',
                    'head_check_job_id': '25102311205416600',
                    'regressed_errids': []
                }
            },
            {
                'id': 'bisect_004',
                'bad_job_id': '25110517160800004',
                'bisect_status': 'success',
                'first_bad_commit': 'd4e5f6g7h8i9',
                'error_id': 'kmsg.kvm#vcpu#unhandled_rdmsr',
                'bisect_result_root': '/result/bisect/results/kernel/2025-11-06/25110517160800004/d4e5f6g7h8i9/004',
                'submit_time': current_time - 15000,
                'start_time': current_time - 14400,
                'end_time': current_time - 12600,
                'j': {
                    'head_check_status': 'unknown',
                    'head_check_at': current_time - 12000,
                    'head_check_commit': 'hh88h9dbbf4a03453g1fb1577f8g33g3h7g7h9i2',
                    'head_check_job_id': '25102311205416700',
                    'regressed_errids': ['error_003']
                }
            },
            {
                'id': 'bisect_005',
                'bad_job_id': '25110517160800005',
                'bisect_status': 'wait',
                'first_bad_commit': '',
                'error_id': 'dmesg.BUG:unable_to_handle_page_fault',
                'bisect_result_root': '',
                'submit_time': current_time - 300,
                'start_time': 0,
                'end_time': 0,
                'j': {}
            },
            {
                'id': 'bisect_006',
                'bad_job_id': '25110517160800006',
                'bisect_status': 'processing',
                'first_bad_commit': '',
                'error_id': 'kmsg.RIP:_oom_kill_process',
                'bisect_result_root': '/result/bisect/results/kernel/2025-11-06/25110517160800006/processing/006',
                'submit_time': current_time - 1800,
                'start_time': current_time - 1500,
                'end_time': 0,
                'j': {}
            },
            {
                'id': 'bisect_007',
                'bad_job_id': '25110517160800007',
                'bisect_status': 'failed',
                'first_bad_commit': '',
                'error_id': 'stderr.ERROR:failed_to_build_kernel',
                'bisect_result_root': '/result/bisect/results/kernel/2025-11-06/25110517160800007/null/007',
                'submit_time': current_time - 5400,
                'start_time': current_time - 5000,
                'end_time': current_time - 3600,
                'last_error': 'Build failed: kernel compilation error',
                'j': {}
            }
        ]

        return sample_tasks
    
    def analyze_tasks(self, tasks):
        """Analyze bisect task results"""
        if not tasks:
            return None

        analysis = {
            'total': len(tasks),
            'success_count': 0,
            'failure_count': 0,
            'pending_count': 0,
            'duplicate_tasks': defaultdict(list),
            'duration_stats': [],
            'first_bad_commits': defaultdict(int),
            'status_distribution': defaultdict(int),
            'timeliness': {
                'timeout': 0,    # >8 hours
                'slow': 0,       # 4-8 hours
                'normal': 0,     # 10 minutes-4 hours
                'fast': 0        # <10 minutes
            },
            # HEAD test check statistics
            'head_check_stats': defaultdict(int),
            'head_check_details': []
        }

        # Collect statistics
        for task in tasks:
            # Status distribution
            analysis['status_distribution'][task['bisect_status']] += 1

            # Success/failure count
            if task['bisect_status'] == 'success':
                analysis['success_count'] += 1
            elif task['bisect_status'] == 'failure':
                analysis['failure_count'] += 1
            else:
                analysis['pending_count'] += 1

            # Duplicate task statistics
            analysis['duplicate_tasks'][task['bad_job_id']].append(task['id'])

            # First bad commit statistics
            if task['first_bad_commit']:
                analysis['first_bad_commits'][task['first_bad_commit']] += 1

            # Timeliness analysis - calculate directly using timestamps
            if task.get('_start_timestamp') and task.get('_end_timestamp'):
                duration = task['_end_timestamp'] - task['_start_timestamp']
                analysis['duration_stats'].append(duration)

                # Classify timeliness
                if duration > 28800:   # 8 hours
                    analysis['timeliness']['timeout'] += 1
                elif duration > 14400:  # 4 hours
                    analysis['timeliness']['slow'] += 1
                elif duration > 600:    # 10 minutes
                    analysis['timeliness']['normal'] += 1
                else:
                    analysis['timeliness']['fast'] += 1

            # HEAD test check statistics
            j_data = task.get('j', {})
            if isinstance(j_data, str):
                try:
                    import json
                    j_data = json.loads(j_data)
                except:
                    j_data = {}

            if not isinstance(j_data, dict):
                j_data = {}

            head_check_status = j_data.get('head_check_status')
            if head_check_status:
                analysis['head_check_stats'][head_check_status] += 1

                # Record details for display
                head_check_detail = {
                    'task_id': task['id'],
                    'bad_job_id': task['bad_job_id'],
                    'bisect_status': task['bisect_status'],
                    'head_check_status': head_check_status,
                    'head_check_at': j_data.get('head_check_at'),
                    'head_check_commit': j_data.get('head_check_commit'),
                    'head_check_job_id': j_data.get('head_check_job_id'),
                    'regressed_errids': j_data.get('regressed_errids', [])
                }
                analysis['head_check_details'].append(head_check_detail)

        # Calculate duplicate rate
        duplicate_count = sum(1 for jobs in analysis['duplicate_tasks'].values() if len(jobs) > 1)
        analysis['duplicate_rate'] = duplicate_count / analysis['total'] if analysis['total'] > 0 else 0

        # Calculate success rate (excluding pending status)
        completed_tasks = analysis['success_count'] + analysis['failure_count']
        analysis['success_rate'] = analysis['success_count'] / completed_tasks if completed_tasks > 0 else 0

        # Calculate average duration and record min/max duration task info
        if analysis['duration_stats']:
            analysis['avg_duration'] = sum(analysis['duration_stats']) / len(analysis['duration_stats'])

            # Find tasks with min and max duration
            min_duration_task = None
            max_duration_task = None
            for task in tasks:
                if task.get('_start_timestamp') and task.get('_end_timestamp'):
                    duration = task['_end_timestamp'] - task['_start_timestamp']
                    if min_duration_task is None or duration < min_duration_task['duration']:
                        min_duration_task = {'task': task, 'duration': duration}
                    if max_duration_task is None or duration > max_duration_task['duration']:
                        max_duration_task = {'task': task, 'duration': duration}

            analysis['min_duration'] = min_duration_task['duration'] if min_duration_task else 0
            analysis['max_duration'] = max_duration_task['duration'] if max_duration_task else 0
            analysis['min_duration_task'] = min_duration_task
            analysis['max_duration_task'] = max_duration_task

        # Convert to regular dicts
        analysis['status_distribution'] = dict(analysis['status_distribution'])
        analysis['first_bad_commits'] = dict(analysis['first_bad_commits'])
        
        return analysis
    
    def calculate_miss_rate_enhanced(self, bisect_tasks, time_window_hours=24):
        """
        Enhanced miss rate calculation (fixed version)

        Miss rate definition:
        1. Ratio of failed jobs without errid (errid detection missed)
        2. Ratio of jobs with errid but bisect failed (bisect location failed)

        Note: Only count completed bisect tasks (success/failed), excluding wait/processing status

        Performance optimization:
        - Use same time window as bisect tasks to avoid querying too much data
        """
        if not bisect_tasks:
            return 0, {}

        try:
            # Use reasonable time threshold (default 24 hours, not 80000 hours)
            cutoff_time = datetime.now() - timedelta(hours=time_window_hours)
            unix_cutoff = int(cutoff_time.timestamp())

            logger.info(f"Calculating miss rate: time window = {time_window_hours} hours")

            # ===== Part 1: Calculate percentage of failed jobs without errid =====

            # 1.1 Query all failed completed tasks (limit to reasonable number)
            query_failed_jobs = {
                "bool": {
                    "must": [
                        {"in": {"j.job_health": ["fail", "abort"]}},
                        {"equals": {"j.job_stage": "finish"}},
                        {"equals": {"j.job_data_readiness": "complete"}},
                        {"range": {"submit_time": {"gte": unix_cutoff}}}
                    ]
                }
            }

            logger.debug(f"Query all failed completed tasks: {json.dumps(query_failed_jobs, indent=2)}")
            failed_jobs = self.client.search(
                index=self.jobs_db,
                query=query_failed_jobs,
                limit=10000,
                options={'max_matches': 10000}
            )
            total_failed_jobs = len(failed_jobs) if failed_jobs else 0

            # 1.2 Query failed tasks without errid
            query_failed_no_errid = {
                "bool": {
                    "must": [
                        {"in": {"j.job_health": ["fail", "abort"]}},
                        {"equals": {"j.job_stage": "finish"}},
                        {"equals": {"j.job_data_readiness": "complete"}},
                        {"range": {"submit_time": {"gte": unix_cutoff}}}
                    ],
                    "should": [
                        {"bool": {"must_not": [{"exists": {"field": "j.errid"}}]}},
                        {"equals": {"j.errid": ""}}
                    ],
                    "minimum_should_match": 1
                }
            }

            logger.debug(f"查询失败但无errid的任务: {json.dumps(query_failed_no_errid, indent=2)}")
            failed_no_errid_jobs = self.client.search(
                index=self.jobs_db,
                query=query_failed_no_errid,
                limit=10000,
                options={'max_matches': 10000}
            )
            failed_no_errid_count = len(failed_no_errid_jobs) if failed_no_errid_jobs else 0

            # 计算第一部分的百分比
            no_errid_rate = failed_no_errid_count / total_failed_jobs if total_failed_jobs > 0 else 0

            # ===== 第二部分：计算有errid但bisect失败的百分比 =====

            # 2.1 先从bisect任务中提取成功和失败的任务ID（需要提前定义，因为后面的debug逻辑会用到）
            successful_bisect_job_ids = {
                task['bad_job_id'] for task in bisect_tasks
                if task.get('bisect_status') == 'success'
            }

            failed_bisect_job_ids = {
                task['bad_job_id'] for task in bisect_tasks
                if task.get('bisect_status') == 'failed'
            }

            # 2.2 查询所有有errid的已完成任务
            # 注意：errid 是 stored field，不能直接搜索
            # 需要使用 j.errid（attribute）来搜索
            query_has_errid = {
                "bool": {
                    "must": [
                        {"equals": {"j.job_stage": "finish"}},
                        {"equals": {"j.job_data_readiness": "complete"}},
                        {"range": {"submit_time": {"gte": unix_cutoff}}},
                        # 使用 j.errid 来过滤（attribute，可搜索）
                        {"exists": {"field": "j.errid"}}
                    ],
                    "must_not": [
                        {"equals": {"j.errid": ""}}
                    ]
                }
            }

            logger.debug(f"查询所有有errid的已完成任务: {json.dumps(query_has_errid, indent=2)}")
            has_errid_jobs = self.client.search(
                index=self.jobs_db,
                query=query_has_errid,
                limit=10000,
                options={'max_matches': 10000}
            )
            total_has_errid = len(has_errid_jobs) if has_errid_jobs else 0

            # DEBUG: 如果查询结果为 0，尝试查询一个具体的 bisect 任务对应的 job
            if total_has_errid == 0 and successful_bisect_job_ids:
                logger.warning(f"查询 'has errid' 返回 0，但有 {len(successful_bisect_job_ids)} 个成功的 bisect 任务")
                logger.warning("尝试直接查询一个 bisect 任务对应的 job 来诊断问题...")

                # 取第一个成功的 bisect 任务的 bad_job_id
                sample_job_id = list(successful_bisect_job_ids)[0]
                logger.info(f"Sample job_id: {sample_job_id}")

                try:
                    sample_job_id_int = int(sample_job_id)
                    sample_query = {"bool": {"must": [{"equals": {"id": sample_job_id_int}}]}}
                    sample_jobs = self.client.search(
                        index=self.jobs_db,
                        query=sample_query,
                        limit=1
                    )

                    if sample_jobs and len(sample_jobs) > 0:
                        sample_job = sample_jobs[0]
                        logger.info(f"Sample job 的顶层字段: {list(sample_job.keys())}")

                        # 检查 errid 的实际位置
                        if 'errid' in sample_job:
                            logger.info(f"✓ errid 在顶层: {sample_job['errid']}")
                        if 'stats' in sample_job:
                            stats = sample_job['stats']
                            if isinstance(stats, dict):
                                if 'errid' in stats:
                                    logger.info(f"✓ errid 在 stats 中: {stats['errid']}")
                                # 查找 errid 模式的 keys
                                errid_keys = [k for k in stats.keys() if 'eid' in k.lower() or 'errid' in k.lower() or '.c:' in k or '.h:' in k]
                                if errid_keys:
                                    logger.info(f"✓ 在 stats 中找到 errid 模式的 keys: {errid_keys[:5]}")
                        if 'j' in sample_job and isinstance(sample_job['j'], dict):
                            if 'errid' in sample_job['j']:
                                logger.info(f"✓ errid 在 j 中: {sample_job['j']['errid']}")
                    else:
                        logger.warning(f"无法找到 job_id={sample_job_id} 的 job")
                except Exception as e:
                    logger.error(f"诊断查询失败: {e}")

            # 2.3 计算有errid但bisect失败的任务数
            # 只统计那些已经有bisect尝试(success或failed)但结果是failed的
            bisect_failed_count = 0
            bisect_not_started_count = 0  # 还没开始bisect的

            if has_errid_jobs:
                for job in has_errid_jobs:
                    job_id = str(job.get('id'))
                    if job_id in successful_bisect_job_ids:
                        # 成功了,不算漏检
                        continue
                    elif job_id in failed_bisect_job_ids:
                        # bisect尝试了但失败了,算漏检
                        bisect_failed_count += 1
                    else:
                        # 还没有对应的bisect任务,不算漏检(可能还在等待)
                        bisect_not_started_count += 1

            # 计算第二部分的百分比
            # 只对已经有bisect尝试的任务计算失败率
            total_bisect_attempted = len(successful_bisect_job_ids) + len(failed_bisect_job_ids)
            bisect_failed_rate = bisect_failed_count / total_bisect_attempted if total_bisect_attempted > 0 else 0

            # ===== 总漏检率 =====
            # 注意: 这里的计算可能需要调整,因为两个比例的分母不同
            # 更合理的方式是分别报告这两个指标
            total_miss_rate = no_errid_rate  # 暂时只用第一部分

            details = {
                # 第一部分：失败jobs中无errid
                'total_failed_jobs': total_failed_jobs,
                'failed_no_errid_count': failed_no_errid_count,
                'no_errid_rate': no_errid_rate,

                # 第二部分：有errid但bisect失败
                'total_has_errid': total_has_errid,
                'bisect_attempted': total_bisect_attempted,
                'bisect_success_count': len(successful_bisect_job_ids),
                'bisect_failed_count': bisect_failed_count,
                'bisect_not_started_count': bisect_not_started_count,
                'bisect_failed_rate': bisect_failed_rate,

                # 总计
                'total_miss_rate': total_miss_rate,

                # 兼容旧格式
                'failed_jobs_count': total_failed_jobs,
                'no_errid_count': failed_no_errid_count,
                'has_errid_count': total_has_errid,
                'unsuccessful_bisect_count': bisect_failed_count,
                'total_missed_jobs': failed_no_errid_count + bisect_failed_count,
                'miss_rate_no_errid': no_errid_rate,
                'miss_rate_unsuccessful_bisect': bisect_failed_rate,

                # 添加时间窗口信息
                'time_window_hours': time_window_hours
            }

            logger.info(f"漏检率计算: no_errid_rate={no_errid_rate:.2%}, bisect_failed_rate={bisect_failed_rate:.2%}")
            logger.info(f"Bisect统计: attempted={total_bisect_attempted}, success={len(successful_bisect_job_ids)}, failed={bisect_failed_count}, not_started={bisect_not_started_count}")
            logger.debug(f"漏检率计算结果: {json.dumps(details, indent=2)}")

            return total_miss_rate, details

        except Exception as err:
            logger.error(f"漏检率计算失败: {err}")
            import traceback
            traceback.print_exc()
            return -1, {}

    def calculate_duplicate_rate_by_category(self, bisect_tasks):
        """
        按类别计算重复率 (优化版本)

        性能优化:
        - 只查询bisect任务关联的job_id,而不是所有jobs
        - 批量查询减少网络往返
        """
        if not bisect_tasks:
            return {}

        try:
            # 收集所有需要查询的 job_id（保持字符串格式）
            job_ids = set()
            for task in bisect_tasks:
                if task.get('bad_job_id'):
                    # 确保 job_id 是字符串格式
                    job_ids.add(str(task['bad_job_id']))

            if not job_ids:
                logger.warning("No job_ids found in bisect_tasks")
                return {}

            logger.info(f"查询 {len(job_ids)} 个job的suite信息")

            # 批量查询这些job的信息 (只查询需要的job,而不是所有jobs)
            # 使用多个 equals 条件代替 in 查询
            should_conditions = []
            for job_id in job_ids:
                # 确保 job_id 是整数类型
                try:
                    job_id_int = int(job_id) if isinstance(job_id, str) else job_id
                    should_conditions.append({"equals": {"id": job_id_int}})
                except (ValueError, TypeError):
                    logger.warning(f"Invalid job_id format: {job_id}")
                    continue

            query = {
                "bool": {
                    "should": should_conditions,
                    "minimum_should_match": 1
                }
            }

            logger.debug(f"Executing query for job suites: limit={len(job_ids)}")
            jobs = self.client.search(
                index=self.jobs_db,
                query=query,
                limit=min(len(job_ids), 10000),  # 限制最大查询数量
                options={'max_matches': min(len(job_ids), 10000)}
            )

            if not jobs:
                logger.warning("No jobs found for the given job_ids")
                return {}

            # 构建 job_id -> suite 映射
            job_suites = {}
            for job in jobs:
                # 确保 job_id 是字符串格式（与 bisect_tasks 中的格式一致）
                job_id = str(job.get('id'))
                j_data = job.get('j', {})
                if isinstance(j_data, dict):
                    suite = j_data.get('suite')
                else:
                    suite = None
                job_suites[job_id] = suite

            # 分类映射
            job_to_category = {}
            for task in bisect_tasks:
                # bad_job_id 也转为字符串确保一致性
                job_id = str(task['bad_job_id']) if task['bad_job_id'] else None
                if job_id:
                    suite = job_suites.get(job_id)
                    if suite:
                        if suite == 'makepkg':
                            category = '构建'
                        elif suite in ['ltp', 'trinity', 'kernel-selftests']:
                            category = '功能测试'
                        elif suite in ['unixbench', 'fio', 'hackbench']:
                            category = '性能'
                        else:
                            category = '其他'
                        job_to_category[job_id] = category
                    else:
                        # If the suite cannot be found, assign it to a default category
                        job_to_category[job_id] = 'Uncategorized'

            category_stats = defaultdict(lambda: {
                'total': 0,
                'commit_counts': defaultdict(int),
                'duplicate_commits': 0
            })

            for task in bisect_tasks:
                if task['first_bad_commit'] and task.get('bad_job_id'):
                    # 确保 job_id 是字符串格式
                    job_id = str(task['bad_job_id'])
                    if job_id in job_to_category:
                        category = job_to_category[job_id]
                        category_stats[category]['total'] += 1
                        category_stats[category]['commit_counts'][task['first_bad_commit']] += 1

            duplicate_rates = {}
            for category, stats in category_stats.items():
                # Correctly calculate the total number of duplicate *tasks*, not just duplicate commits.
                # For each commit that appears more than once, sum the surplus occurrences (count - 1).
                total_duplicate_tasks = sum(count - 1 for count in stats['commit_counts'].values() if count > 1)

                duplicate_rates[category] = {
                    'total_tasks': stats['total'],
                    'total_duplicate_tasks': total_duplicate_tasks,
                    'unique_commits': len(stats['commit_counts']),
                    'duplicate_rate': total_duplicate_tasks / stats['total'] if stats['total'] > 0 else 0
                }

            return duplicate_rates

        except Exception as err:
            logger.error(f"重复率计算失败: {err}")
            import traceback
            traceback.print_exc()
            return {}
    
    def calculate_success_rate_enhanced(self, bisect_tasks):
        """
        增强的成功率计算 (简化版本)

        根据集成后的两步验证机制，直接判断 bisect_status='success' 即为成功
        成功率 = success任务数 / (所有已完成的bisect任务数)
        已完成 = success + failed (排除 wait, processing, pending_verification)
        """
        if not bisect_tasks:
            return 0, {}

        # 统计各种状态
        status_counts = defaultdict(int)

        for task in bisect_tasks:
            bisect_status = task['bisect_status']
            status_counts[bisect_status] += 1

        success_count = status_counts.get('success', 0)
        failure_count = status_counts.get('failed', 0)  # 使用 'failed' 而不是 'failure'

        # 分母只计算实际完成的任务 (success 或 failed)
        completed_tasks = success_count + failure_count

        # 成功率 = success任务数 / 完成的任务
        success_rate = success_count / completed_tasks if completed_tasks > 0 else 0

        details = {
            'total_tasks': len(bisect_tasks),
            'completed_tasks': completed_tasks,
            'success_count': success_count,
            'failure_count': failure_count,
            'wait_count': status_counts.get('wait', 0),
            'processing_count': status_counts.get('processing', 0),
            'pending_verification_count': status_counts.get('pending_verification', 0),
        }

        return success_rate, details

    def calculate_metrics_by_category(self, bisect_tasks, time_window_hours=24):
        """
        按分类计算所有指标 (构建 bisect 和功能 bisect)

        目标指标:
        - 构建 bisect: 成功率≥95%, 漏检率≤10%, 重复率≤2%, 时效≤1天
        - 功能 bisect: 成功率≥80%, 漏检率≤20%, 重复率≤3%, 时效≤5天

        :param bisect_tasks: Bisect任务列表
        :param time_window_hours: 时间窗口（小时）
        :return: 按分类的指标字典
        """
        if not bisect_tasks:
            return {}

        try:
            # 查询所有需要的job信息用于分类
            job_ids = set()
            for task in bisect_tasks:
                if task.get('bad_job_id'):
                    job_ids.add(str(task['bad_job_id']))

            if not job_ids:
                logger.warning("No job_ids found in bisect_tasks")
                return {}

            logger.info(f"Querying {len(job_ids)} jobs for category classification")

            # 批量查询job信息
            should_conditions = []
            for job_id in job_ids:
                try:
                    job_id_int = int(job_id)
                    should_conditions.append({"equals": {"id": job_id_int}})
                except (ValueError, TypeError):
                    logger.warning(f"Invalid job_id format: {job_id}")
                    continue

            query = {
                "bool": {
                    "should": should_conditions,
                    "minimum_should_match": 1
                }
            }

            jobs = self.client.search(
                index=self.jobs_db,
                query=query,
                limit=min(len(job_ids), 10000),
                options={'max_matches': min(len(job_ids), 10000)}
            )

            if not jobs:
                logger.warning("No jobs found for category classification")
                return {}

            # 构建 job_id -> suite 映射
            job_suites = {}
            for job in jobs:
                job_id = str(job.get('id'))
                j_data = job.get('j', {})
                if isinstance(j_data, dict):
                    suite = j_data.get('suite')
                else:
                    suite = None
                job_suites[job_id] = suite

            # 分类任务：构建 vs 功能
            build_tasks = []
            functional_tasks = []

            for task in bisect_tasks:
                job_id = str(task['bad_job_id']) if task['bad_job_id'] else None
                if job_id:
                    suite = job_suites.get(job_id)
                    if suite == 'makepkg':
                        build_tasks.append(task)
                    elif suite in ['ltp', 'trinity', 'kernel-selftests', 'xfstests', 'cpu-hotplug']:
                        functional_tasks.append(task)
                    # 跳过其他类型（性能测试等）

            logger.info(f"Categorized tasks: Build={len(build_tasks)}, Functional={len(functional_tasks)}")

            # 为每个分类计算所有指标
            results = {}

            # 构建 bisect 指标
            if build_tasks:
                build_metrics = self._calculate_category_metrics(
                    build_tasks,
                    'Build',
                    time_window_hours,
                    targets={'success_rate': 0.95, 'miss_rate': 0.10, 'duplicate_rate': 0.02, 'timeliness_days': 1}
                )
                results['build'] = build_metrics

            # 功能 bisect 指标
            if functional_tasks:
                functional_metrics = self._calculate_category_metrics(
                    functional_tasks,
                    'Functional',
                    time_window_hours,
                    targets={'success_rate': 0.80, 'miss_rate': 0.20, 'duplicate_rate': 0.03, 'timeliness_days': 5}
                )
                results['functional'] = functional_metrics

            return results

        except Exception as err:
            logger.error(f"Category metrics calculation failed: {err}")
            import traceback
            traceback.print_exc()
            return {}

    def _calculate_category_metrics(self, tasks, category_name, time_window_hours, targets):
        """
        计算单个分类的所有指标

        :param tasks: 该分类的任务列表
        :param category_name: 分类名称 ('Build' or 'Functional')
        :param time_window_hours: 时间窗口
        :param targets: 目标指标字典 {'success_rate', 'miss_rate', 'duplicate_rate', 'timeliness_days'}
        :return: 指标结果字典
        """
        metrics = {
            'category': category_name,
            'total_tasks': len(tasks),
            'targets': targets
        }

        # 1. 成功率
        success_rate, success_details = self.calculate_success_rate_enhanced(tasks)
        metrics['success_rate'] = success_rate
        metrics['success_rate_percent'] = success_rate * 100
        metrics['success_details'] = success_details
        metrics['success_rate_meets_target'] = success_rate >= targets['success_rate']

        # 2. 漏检率
        miss_rate, miss_details = self.calculate_miss_rate_enhanced(tasks, time_window_hours)
        metrics['miss_rate'] = miss_rate
        metrics['miss_rate_percent'] = miss_rate * 100
        metrics['miss_details'] = miss_details
        metrics['miss_rate_meets_target'] = miss_rate <= targets['miss_rate']

        # 3. 重复率 (使用现有的 calculate_duplicate_rate_by_category 逻辑)
        # 这里需要单独计算该分类的重复率
        duplicate_rate = self._calculate_single_category_duplicate_rate(tasks)
        metrics['duplicate_rate'] = duplicate_rate
        metrics['duplicate_rate_percent'] = duplicate_rate * 100
        metrics['duplicate_rate_meets_target'] = duplicate_rate <= targets['duplicate_rate']

        # 4. 时效 (90%分位数，单位：天)
        timeliness_90_seconds, timeliness_details = self.calculate_timeliness_enhanced(tasks)
        timeliness_90_days = timeliness_90_seconds / 86400 if timeliness_90_seconds > 0 else 0
        metrics['timeliness_90_days'] = timeliness_90_days
        metrics['timeliness_90_seconds'] = timeliness_90_seconds
        metrics['timeliness_details'] = timeliness_details
        metrics['timeliness_meets_target'] = timeliness_90_days <= targets['timeliness_days']

        # 5. 总体评估
        all_targets_met = (
            metrics['success_rate_meets_target'] and
            metrics['miss_rate_meets_target'] and
            metrics['duplicate_rate_meets_target'] and
            metrics['timeliness_meets_target']
        )
        metrics['all_targets_met'] = all_targets_met

        return metrics

    def _calculate_single_category_duplicate_rate(self, tasks):
        """
        计算单个分类的重复率

        重复率 = 重复任务数 / 总任务数
        重复任务：同一个 first_bad_commit 出现多次

        :param tasks: 任务列表
        :return: 重复率 (0.0-1.0)
        """
        if not tasks:
            return 0.0

        commit_counts = defaultdict(int)
        for task in tasks:
            if task.get('first_bad_commit'):
                commit_counts[task['first_bad_commit']] += 1

        # 计算重复任务数：对于每个出现多次的 commit，重复数 = count - 1
        total_duplicate_tasks = sum(count - 1 for count in commit_counts.values() if count > 1)

        duplicate_rate = total_duplicate_tasks / len(tasks) if len(tasks) > 0 else 0.0
        return duplicate_rate
    
    def analyze_failed_tasks(self, tasks):
        """
        分析失败的bisect任务，按last_error进行聚类
        """
        if not tasks:
            return None

        # 过滤出失败的任务
        failed_tasks = [task for task in tasks if task.get('bisect_status') == 'failed']

        if not failed_tasks:
            return None

        # 按last_error进行聚类
        error_clusters = defaultdict(list)
        no_error_tasks = []

        for task in failed_tasks:
            # 获取last_error，可能在j字段中
            j_data = task.get('j', {})
            if isinstance(j_data, str):
                try:
                    import json
                    j_data = json.loads(j_data)
                except:
                    j_data = {}

            if not isinstance(j_data, dict):
                j_data = {}

            last_error = j_data.get('last_error') or task.get('last_error')

            if last_error:
                # 清理错误信息，提取关键部分
                error_key = self._extract_error_key(last_error)
                error_clusters[error_key].append({
                    'task_id': task.get('id'),
                    'bad_job_id': task.get('bad_job_id'),
                    'start_time': task.get('start_time'),
                    'end_time': task.get('end_time'),
                    'first_bad_commit': task.get('first_bad_commit'),
                    'full_error': last_error
                })
            else:
                no_error_tasks.append({
                    'task_id': task.get('id'),
                    'bad_job_id': task.get('bad_job_id'),
                    'start_time': task.get('start_time'),
                    'end_time': task.get('end_time'),
                    'first_bad_commit': task.get('first_bad_commit')
                })

        return {
            'total_failed': len(failed_tasks),
            'error_clusters': dict(error_clusters),
            'no_error_tasks': no_error_tasks,
            'unique_errors': len(error_clusters),
            'tasks_with_error': sum(len(tasks) for tasks in error_clusters.values()),
            'tasks_without_error': len(no_error_tasks)
        }

    def _extract_error_key(self, error_msg):
        """
        从错误消息中提取关键部分用于聚类
        """
        if not error_msg:
            return "unknown_error"

        # 移除时间戳、文件路径中的具体数字等变化部分
        import re

        # 移除时间戳 (例如: 2024-01-01 12:00:00)
        error_key = re.sub(r'\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}', '', error_msg)

        # 移除job_id (例如: job_id=123456)
        error_key = re.sub(r'job_id=\d+', 'job_id=XXX', error_key)

        # 移除commit hash (40位十六进制)
        error_key = re.sub(r'\b[a-f0-9]{40}\b', 'COMMIT_HASH', error_key)
        error_key = re.sub(r'\b[a-f0-9]{7,12}\b', 'SHORT_HASH', error_key)

        # 移除具体的数字（但保留错误代码）
        error_key = re.sub(r'(?<!error\s)\b\d+\b', 'NUM', error_key)

        # 移除多余的空格
        error_key = ' '.join(error_key.split())

        # 如果错误消息太长，只取前200个字符
        if len(error_key) > 200:
            error_key = error_key[:200] + "..."

        return error_key or "unknown_error"

    def format_failed_task_analysis(self, analysis):
        """Format failed task analysis results"""
        if not analysis:
            return "No failed task analysis results"

        output = [
            "=" * 80,
            "Failed Bisect Tasks Analysis Report",
            "=" * 80,
            f"Total Failed Tasks: {analysis['total_failed']}",
            f"Unique Error Types: {analysis['unique_errors']}",
            f"Tasks with Error Message: {analysis['tasks_with_error']}",
            f"Tasks without Error Message: {analysis['tasks_without_error']}",
            "",
            "Error Clustering Results:",
            "-" * 40
        ]

        # Sort error types by occurrence count
        sorted_errors = sorted(
            analysis['error_clusters'].items(),
            key=lambda x: len(x[1]),
            reverse=True
        )

        for i, (error_key, tasks) in enumerate(sorted_errors, 1):
            output.append(f"\n[Error Type {i}] ({len(tasks)} occurrences)")
            output.append(f"Error Pattern: {error_key}")
            output.append(f"Affected Tasks:")

            # Display first 5 tasks with details
            for j, task in enumerate(tasks[:5]):
                output.append(f"  {j+1}. Task ID: {task['task_id']}")
                output.append(f"     Bad Job ID: {task['bad_job_id']}")
                if task.get('first_bad_commit'):
                    output.append(f"     First Bad Commit: {task['first_bad_commit'][:12]}")
                output.append(f"     Time: {task['start_time']} - {task['end_time']}")

                # Display full error message for the first task
                if j == 0 and task.get('full_error'):
                    output.append(f"     Full Error Message:")
                    # Format long error messages
                    import textwrap
                    wrapped = textwrap.wrap(task['full_error'], width=70)
                    for line in wrapped[:5]:  # Show max 5 lines
                        output.append(f"       {line}")
                    if len(wrapped) > 5:
                        output.append(f"       ... ({len(wrapped)-5} more lines)")

            if len(tasks) > 5:
                output.append(f"  ... and {len(tasks)-5} more tasks")

        # Display tasks without error messages
        if analysis['no_error_tasks']:
            output.append(f"\n[Tasks Without Error Message] ({len(analysis['no_error_tasks'])} tasks)")
            for i, task in enumerate(analysis['no_error_tasks'][:10], 1):
                output.append(f"  {i}. Task ID: {task['task_id']}, Bad Job ID: {task['bad_job_id']}")
            if len(analysis['no_error_tasks']) > 10:
                output.append(f"  ... and {len(analysis['no_error_tasks'])-10} more tasks")

        output.append("")
        output.append("=" * 80)

        return "\n".join(output)

    def calculate_timeliness_enhanced(self, bisect_tasks, percentile=90):
        """
        增强的时效计算
        时效 = 90%的问题，在所述时间内完成定位
        """
        if not bisect_tasks:
            return 0, {}
        
        durations = []
        completed_tasks = []
        
        for task in bisect_tasks:
            if (task.get('_start_timestamp') and task.get('_end_timestamp') and
                task['bisect_status'] in ['success', 'failure']):
                duration = task['_end_timestamp'] - task['_start_timestamp']
                durations.append(duration)
                completed_tasks.append(task)
        
        if not durations:
            return 0, {'message': '无已完成任务'}
        
        # 计算90%分位数
        durations.sort()
        percentile_index = int(len(durations) * percentile / 100)
        percentile_time = durations[percentile_index] if percentile_index < len(durations) else durations[-1]

        # 时效分类
        time_categories = {
            'fast': 0,      # < 10分钟
            'normal': 0,    # 10分钟 - 4小时
            'slow': 0,      # 4小时 - 8小时
            'timeout': 0    # > 8小时
        }

        for duration in durations:
            if duration < 600:  # 10分钟
                time_categories['fast'] += 1
            elif duration < 14400:  # 4小时
                time_categories['normal'] += 1
            elif duration < 28800:  # 8小时
                time_categories['slow'] += 1
            else:
                time_categories['timeout'] += 1

        # 找到最小和最大耗时的任务信息
        min_duration_task = None
        max_duration_task = None
        for task in completed_tasks:
            if task.get('_start_timestamp') and task.get('_end_timestamp'):
                duration = task['_end_timestamp'] - task['_start_timestamp']
                if min_duration_task is None or duration < min_duration_task['duration']:
                    min_duration_task = {'task': task, 'duration': duration}
                if max_duration_task is None or duration > max_duration_task['duration']:
                    max_duration_task = {'task': task, 'duration': duration}

        details = {
            'total_completed': len(durations),
            'percentile_time_seconds': percentile_time,
            'percentile_time_minutes': percentile_time / 60,
            'percentile_time_hours': percentile_time / 3600,
            'avg_duration_seconds': sum(durations) / len(durations),
            'min_duration_seconds': min(durations),
            'max_duration_seconds': max(durations),
            'time_categories': time_categories,
            'min_duration_task': min_duration_task,
            'max_duration_task': max_duration_task
        }
        
        return percentile_time, details
    
    def format_analysis(self, analysis, hours=24):
        """Format analysis results"""
        if not analysis:
            return "No analysis results"

        # Enhanced metrics calculation
        success_rate_percent = analysis.get('success_rate_enhanced', 0) * 100
        duplicate_rate_percent = analysis.get('duplicate_rate', 0) * 100
        miss_rate_percent = analysis.get('miss_rate', 0) * 100
        timeliness_90_minutes = analysis.get('timeliness_90', 0) / 60

        # Timeliness distribution
        timeliness = analysis.get('timeliness', {})
        total_completed = timeliness['timeout'] + timeliness['normal'] + timeliness['fast']

        output = [
            "=" * 80,
            "Bisect Task Analysis Report",
            "=" * 80,
            f"Total Tasks: {analysis['total']}",
            f"Time Range: Last {hours} hours",
            "",
        ]

        # Display category metrics first if available
        category_metrics = analysis.get('category_metrics', {})
        if category_metrics:
            output.append("Enhanced Metrics by Category:")
            output.append("")

            # Build metrics
            if 'build' in category_metrics:
                build = category_metrics['build']
                output.append(f"  Build Bisect ({build['total_tasks']} tasks):")
                output.append(f"    - Success Rate: {build['success_rate_percent']:.2f}%")
                output.append(f"    - Miss Rate: {build['miss_rate_percent']:.2f}%")
                output.append(f"    - Duplicate Rate: {build['duplicate_rate_percent']:.2f}%")
                output.append(f"    - 90% Timeliness: {build['timeliness_90_days']:.2f} days ({build['timeliness_90_seconds']/60:.1f} minutes)")

            # Functional metrics
            if 'functional' in category_metrics:
                func = category_metrics['functional']
                output.append(f"  Functional Bisect ({func['total_tasks']} tasks):")
                output.append(f"    - Success Rate: {func['success_rate_percent']:.2f}%")
                output.append(f"    - Miss Rate: {func['miss_rate_percent']:.2f}%")
                output.append(f"    - Duplicate Rate: {func['duplicate_rate_percent']:.2f}%")
                output.append(f"    - 90% Timeliness: {func['timeliness_90_days']:.2f} days ({func['timeliness_90_seconds']/60:.1f} minutes)")

            output.append("")
            output.append("Overall Metrics (all categories combined):")
        else:
            output.append("Enhanced Metrics (with Verification):")

        # Overall metrics
        output.extend([
            f"  - Success Rate: {success_rate_percent:.2f}% (verified tasks / completed tasks)",
            f"  - Miss Rate: {miss_rate_percent:.2f}%",
            f"  - Duplicate Rate: {duplicate_rate_percent:.2f}%",
            f"  - 90% Timeliness: {timeliness_90_minutes:.1f} minutes",
            "",
        ])

        # Add success status details
        success_details = analysis.get('success_details', {})
        if success_details:
            success_count = success_details.get('success_count', 0)
            failure_count = success_details.get('failure_count', 0)
            completed_tasks = success_details.get('completed_tasks', 0)

            output.extend([
                "Success Status Breakdown:",
                f"  Total Completed: {completed_tasks}",
                f"  - Success (bisect_status='success'): {success_count} ({success_count/completed_tasks*100:.1f}%)",
                f"  - Failed (bisect_status='failed'): {failure_count} ({failure_count/completed_tasks*100:.1f}%)",
                f"  - Pending (wait/processing): {success_details.get('wait_count', 0) + success_details.get('processing_count', 0)}",
                "",
            ])

        output.append("Miss Rate Details:")

        # Add miss rate details
        miss_details = analysis.get('miss_details', {})
        if miss_details:
            output.extend([
                f"  Failed/Aborted Jobs Total: {miss_details.get('failed_jobs_count', 0)}",
                f"  Jobs without errid: {miss_details.get('no_errid_count', 0)} ({miss_details.get('miss_rate_no_errid', 0)*100:.1f}%)",
                "",
                f"  Jobs with errid: {miss_details.get('has_errid_count', 0)}",
                f"  Bisect attempted: {miss_details.get('bisect_attempted', 0)}",
                f"    ├─ Success: {miss_details.get('bisect_success_count', 0)}",
                f"    └─ Failed: {miss_details.get('unsuccessful_bisect_count', 0)} ({miss_details.get('miss_rate_unsuccessful_bisect', 0)*100:.1f}%)",
                f"  Bisect not started: {miss_details.get('bisect_not_started_count', 0)} (wait/processing)",
            ])
        
        # Add duplicate rate by category
        output.append("\nDuplicate Rate by Category:")
        duplicate_by_category = analysis.get('duplicate_by_category', {})
        if duplicate_by_category:
            for category, stats in duplicate_by_category.items():
                rate = stats.get('duplicate_rate', 0) * 100
                output.append(f"  {category}: {rate:.1f}% ({stats.get('total_duplicate_tasks', 0)} duplicate tasks out of {stats.get('total_tasks', 0)})")
        else:
            output.append("  No category data available")

        output.extend([
            "",
            "Status Distribution:"
        ])

        # Add status distribution
        for status, count in analysis['status_distribution'].items():
            output.append(f"  {status}: {count} ({count/analysis['total']*100:.1f}%)")

        # Enhanced timeliness analysis
        output.append("\nTimeliness Analysis (Enhanced):")
        timeliness_details = analysis.get('timeliness_details', {})
        if timeliness_details:
            time_categories = timeliness_details.get('time_categories', {})
            total_completed = timeliness_details.get('total_completed', 0)
            if total_completed > 0:
                output.extend([
                    f"  Total Completed: {total_completed}",
                    f"  Fast (<10m): {time_categories.get('fast', 0)} ({time_categories.get('fast', 0)/total_completed*100:.1f}%)",
                    f"  Normal (10m-4h): {time_categories.get('normal', 0)} ({time_categories.get('normal', 0)/total_completed*100:.1f}%)",
                    f"  Slow (4h-8h): {time_categories.get('slow', 0)} ({time_categories.get('slow', 0)/total_completed*100:.1f}%)",
                    f"  Timeout (>8h): {time_categories.get('timeout', 0)} ({time_categories.get('timeout', 0)/total_completed*100:.1f}%)",
                    f"  90% Percentile: {timeliness_details.get('percentile_time_minutes', 0):.1f} minutes",
                    f"  Average Duration: {timeliness_details.get('avg_duration_seconds', 0)/60:.1f} minutes",
                ])

                # Display min and max duration with corresponding task info
                min_duration_task = analysis.get('min_duration_task')
                max_duration_task = analysis.get('max_duration_task')

                if min_duration_task:
                    min_task_id = min_duration_task['task'].get('id', 'N/A')
                    output.append(f"  Min Duration: {timeliness_details.get('min_duration_seconds', 0)/60:.1f} minutes (id: {min_task_id})")

                if max_duration_task:
                    max_task_id = max_duration_task['task'].get('id', 'N/A')
                    output.append(f"  Max Duration: {timeliness_details.get('max_duration_seconds', 0)/60:.1f} minutes (id: {max_task_id})")
            else:
                output.append("  No completed tasks for timeliness analysis")
        else:
            # Fallback to legacy timeliness analysis
            if total_completed > 0:
                output.append(f"  Timeout (>1h): {timeliness['timeout']} ({timeliness['timeout']/total_completed*100:.1f}%)")
                output.append(f"  Normal (10m-1h): {timeliness['normal']} ({timeliness['normal']/total_completed*100:.1f}%)")
                output.append(f"  Fast (<10m): {timeliness['fast']} ({timeliness['fast']/total_completed*100:.1f}%)")
                output.append(f"  Average Duration: {analysis.get('avg_duration', 0)/60:.1f} minutes")

                # Display min and max duration with corresponding task info
                min_duration_task = analysis.get('min_duration_task')
                max_duration_task = analysis.get('max_duration_task')

                if min_duration_task:
                    min_task_id = min_duration_task['task'].get('id', 'N/A')
                    output.append(f"  Min Duration: {analysis.get('min_duration', 0)/60:.1f} minutes (id: {min_task_id})")

                if max_duration_task:
                    max_task_id = max_duration_task['task'].get('id', 'N/A')
                    output.append(f"  Max Duration: {analysis.get('max_duration', 0)/60:.1f} minutes (id: {max_task_id})")
            else:
                output.append("  No completed tasks")

        # HEAD test check results
        output.append("\nHEAD Test Check Results:")
        head_check_stats = analysis.get('head_check_stats', {})
        head_check_details = analysis.get('head_check_details', [])

        if head_check_stats:
            total_head_checks = sum(head_check_stats.values())
            output.append(f"  Total HEAD check tasks: {total_head_checks}")

            for status, count in head_check_stats.items():
                percentage = count / total_head_checks * 100 if total_head_checks > 0 else 0
                output.append(f"  {status}: {count} ({percentage:.1f}%)")

            # Display details (first 5 tasks)
            output.append("\n  HEAD Check Details (first 5 tasks):")
            for i, detail in enumerate(head_check_details[:5]):
                output.append(f"    {i+1}. Task ID: {detail['task_id']}")
                output.append(f"       Bad Job ID: {detail['bad_job_id']}")
                output.append(f"       Bisect Status: {detail['bisect_status']}")
                output.append(f"       HEAD Check Status: {detail['head_check_status']}")
                if detail.get('head_check_commit'):
                    output.append(f"       HEAD Check Commit: {detail['head_check_commit'][:12]}")
                if detail.get('head_check_job_id'):
                    output.append(f"       HEAD Check Job ID: {detail['head_check_job_id']}")
                if detail.get('regressed_errids'):
                    output.append(f"       Regressed Errids: {', '.join(detail['regressed_errids'])}")
                output.append("")
        else:
            output.append("  No HEAD test check data")

        output.append("\nFirst Bad Commit Distribution:")
        # Add commit distribution (top 10)
        sorted_commits = sorted(
            analysis['first_bad_commits'].items(),
            key=lambda x: x[1],
            reverse=True
        )[:10]

        for commit, count in sorted_commits:
            output.append(f"  {commit}: {count} times")

        # NEW: Category Metrics Report
        category_metrics = analysis.get('category_metrics', {})
        if category_metrics:
            output.append("")
            output.append("=" * 80)
            output.append("CATEGORY METRICS (Build vs Functional Bisect)")
            output.append("=" * 80)

            for category_key in ['build', 'functional']:
                if category_key not in category_metrics:
                    continue

                metrics = category_metrics[category_key]
                category_name = metrics['category']
                targets = metrics['targets']

                output.append("")
                output.append(f"--- {category_name} Bisect Metrics ---")
                output.append(f"Total Tasks: {metrics['total_tasks']}")
                output.append("")

                # Target Requirements
                output.append("Target Requirements:")
                output.append(f"  - Success Rate: ≥{targets['success_rate']*100:.0f}%")
                output.append(f"  - Miss Rate: ≤{targets['miss_rate']*100:.0f}%")
                output.append(f"  - Duplicate Rate: ≤{targets['duplicate_rate']*100:.0f}%")
                output.append(f"  - Timeliness (90%): ≤{targets['timeliness_days']} day(s)")
                output.append("")

                # Actual Metrics
                output.append("Actual Metrics:")

                # Success Rate
                sr = metrics['success_rate_percent']
                sr_target = targets['success_rate'] * 100
                sr_status = "✓ PASS" if metrics['success_rate_meets_target'] else "✗ FAIL"
                output.append(f"  - Success Rate: {sr:.2f}% (target: ≥{sr_target:.0f}%) {sr_status}")

                # Miss Rate
                mr = metrics['miss_rate_percent']
                mr_target = targets['miss_rate'] * 100
                mr_status = "✓ PASS" if metrics['miss_rate_meets_target'] else "✗ FAIL"
                output.append(f"  - Miss Rate: {mr:.2f}% (target: ≤{mr_target:.0f}%) {mr_status}")

                # Duplicate Rate
                dr = metrics['duplicate_rate_percent']
                dr_target = targets['duplicate_rate'] * 100
                dr_status = "✓ PASS" if metrics['duplicate_rate_meets_target'] else "✗ FAIL"
                output.append(f"  - Duplicate Rate: {dr:.2f}% (target: ≤{dr_target:.0f}%) {dr_status}")

                # Timeliness
                tl = metrics['timeliness_90_days']
                tl_target = targets['timeliness_days']
                tl_status = "✓ PASS" if metrics['timeliness_meets_target'] else "✗ FAIL"
                output.append(f"  - Timeliness (90%): {tl:.2f} days (target: ≤{tl_target} day(s)) {tl_status}")

                output.append("")

                # Overall Assessment
                if metrics['all_targets_met']:
                    output.append(f"Overall Assessment: ✓ ALL TARGETS MET")
                else:
                    failed_metrics = []
                    if not metrics['success_rate_meets_target']:
                        failed_metrics.append("Success Rate")
                    if not metrics['miss_rate_meets_target']:
                        failed_metrics.append("Miss Rate")
                    if not metrics['duplicate_rate_meets_target']:
                        failed_metrics.append("Duplicate Rate")
                    if not metrics['timeliness_meets_target']:
                        failed_metrics.append("Timeliness")
                    output.append(f"Overall Assessment: ✗ FAILED ({', '.join(failed_metrics)})")

        output.append("")
        output.append("=" * 80)
        return "\n".join(output)
    
    def format_tasks_table(self, tasks, max_rows=None, sort_by_status=True, status_filter=None):
        """
        将bisect任务格式化为表格输出

        :param tasks: 任务列表
        :param max_rows: 最大显示行数，None表示显示所有
        :param sort_by_status: 是否按状态排序（优先级：wait > failed > success）
        :param status_filter: 状态筛选，None表示显示所有，可选值：'wait', 'failed', 'success', 'processing'
        :return: 格式化的表格字符串
        """
        if not tasks:
            return "No tasks to display"

        # 如果指定了状态筛选，先过滤
        if status_filter:
            filtered_tasks = [
                task for task in tasks
                if task.get('bisect_status', '').lower() == status_filter.lower()
            ]
            if not filtered_tasks:
                return f"No tasks found with status '{status_filter}'"
            tasks = filtered_tasks

        # 按状态排序（如果启用）
        if sort_by_status:
            # 定义状态优先级：wait/processing > failed > success > 其他
            status_priority = {
                'wait': 0,
                'processing': 0,
                'pending_verification': 1,
                'failed': 2,
                'success': 3
            }

            tasks = sorted(
                tasks,
                key=lambda x: (
                    status_priority.get(x.get('bisect_status', 'unknown').lower(), 999),
                    x.get('submit_time', 0)  # 相同状态按提交时间排序
                )
            )

        # 准备表格数据
        headers = [
            'ID', 'Bad Job ID', 'Error ID', 'First Bad Commit',
            'Start Time', 'End Time', 'Duration (h)', 'Result'
        ]

        table_data = []
        display_tasks = tasks[:max_rows] if max_rows else tasks

        for task in display_tasks:
            # 获取 error_id（从顶层字段） - 完整显示，不截断
            error_id = task.get('error_id', 'N/A')
            if not error_id or error_id == '':
                error_id = 'N/A'

            # 获取 start_time 和 end_time - 处理可能是字符串的情况
            start_time = 'N/A'
            end_time = 'N/A'
            start_timestamp = task.get('start_time', 0)
            end_timestamp = task.get('end_time', 0)

            try:
                start_timestamp = int(start_timestamp) if start_timestamp else 0
                if start_timestamp > 0:
                    start_time = datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d %H:%M:%S')
            except (ValueError, TypeError, OSError):
                start_timestamp = 0
                start_time = 'N/A'

            try:
                end_timestamp = int(end_timestamp) if end_timestamp else 0
                if end_timestamp > 0:
                    end_time = datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d %H:%M:%S')
            except (ValueError, TypeError, OSError):
                end_timestamp = 0
                end_time = 'N/A'

            # 计算耗时（小时）
            duration_hours = 'N/A'
            if start_timestamp and start_timestamp > 0 and end_timestamp and end_timestamp > 0:
                duration_seconds = end_timestamp - start_timestamp
                duration_hours = f"{duration_seconds / 3600:.2f}"

            # 格式化提交哈希（缩短显示）
            first_bad_commit = task.get('first_bad_commit', 'N/A')
            if not first_bad_commit or first_bad_commit == '':
                first_bad_commit = 'N/A'
            elif len(first_bad_commit) > 12:
                first_bad_commit = first_bad_commit[:12]

            # 格式化结果状态
            result = task.get('bisect_status', 'unknown')

            row = [
                str(task.get('id', 'N/A')),
                str(task.get('bad_job_id', 'N/A')),
                str(error_id),
                str(first_bad_commit),
                str(start_time),
                str(end_time),
                str(duration_hours),
                str(result)
            ]
            table_data.append(row)

        # 使用 tabulate 或者自定义格式化
        if HAS_TABULATE:
            table_str = tabulate(table_data, headers=headers, tablefmt='grid')
        else:
            # 简单的表格格式化（如果没有tabulate库）
            table_str = self._format_simple_table(headers, table_data)

        # 添加统计信息
        status_counts = {}
        for task in tasks:
            status = task.get('bisect_status', 'unknown')
            status_counts[status] = status_counts.get(status, 0) + 1

        status_summary = ", ".join([f"{status}: {count}" for status, count in sorted(status_counts.items())])

        summary = f"\nShowing {len(display_tasks)} of {len(tasks)} tasks"
        if status_filter:
            summary += f" (filtered by status: {status_filter})"
        if max_rows and len(tasks) > max_rows:
            summary += f"\nUse --table-rows to show more"
        summary += f"\nStatus distribution: {status_summary}"

        return table_str + "\n" + summary

    def _format_simple_table(self, headers, rows):
        """简单的表格格式化器（当tabulate不可用时）"""
        # 计算每列的最大宽度
        col_widths = [len(h) for h in headers]

        for row in rows:
            for i, cell in enumerate(row):
                col_widths[i] = max(col_widths[i], len(str(cell)))

        # 创建分隔线
        separator = '+' + '+'.join('-' * (w + 2) for w in col_widths) + '+'

        # 格式化表头
        header_row = '|' + '|'.join(f' {h:<{col_widths[i]}} ' for i, h in enumerate(headers)) + '|'

        # 格式化数据行
        data_rows = []
        for row in rows:
            data_row = '|' + '|'.join(f' {str(cell):<{col_widths[i]}} ' for i, cell in enumerate(row)) + '|'
            data_rows.append(data_row)

        # 组合表格
        table = [separator, header_row, separator]
        table.extend(data_rows)
        table.append(separator)

        return '\n'.join(table)

    def export_to_csv(self, tasks, filename):
        """Export results to CSV file"""
        if not tasks:
            logger.warning("No tasks to export")
            return False

        try:
            with open(filename, 'w', newline='', encoding='utf-8') as f:
                fieldnames = [
                    'id', 'bad_job_id', 'status', 'start_time', 'end_time',
                    'first_bad_commit', 'head_check_status', 'head_check_commit',
                    'head_check_job_id', 'regressed_errids'
                ]

                writer = csv.DictWriter(f, fieldnames=fieldnames)
                writer.writeheader()

                for task in tasks:
                    # Parse j field to get HEAD test info
                    j_data = task.get('j', {})
                    if isinstance(j_data, str):
                        try:
                            import json
                            j_data = json.loads(j_data)
                        except:
                            j_data = {}

                    if not isinstance(j_data, dict):
                        j_data = {}

                    # Only keep fields that actually exist
                    row = {
                        'id': task['id'],
                        'bad_job_id': task['bad_job_id'],
                        'status': task['bisect_status'],
                        'start_time': task['start_time'],
                        'end_time': task.get('end_time', ''),
                        'first_bad_commit': task['first_bad_commit'],
                        'head_check_status': j_data.get('head_check_status', ''),
                        'head_check_commit': j_data.get('head_check_commit', ''),
                        'head_check_job_id': j_data.get('head_check_job_id', ''),
                        'regressed_errids': ','.join(j_data.get('regressed_errids', []))
                    }
                    writer.writerow(row)

            logger.info(f"Results exported to: {filename}")
            return True
        except Exception as e:
            logger.error(f"Export failed: {str(e)}")
            return False

def main():
    parser = argparse.ArgumentParser(
        description='Bisect Task Batch Analysis Tool',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )

    # Query parameters
    parser.add_argument(
        '--hours',
        type=int,
        default=24,
        help='Query tasks within the last N hours'
    )
    parser.add_argument(
        '--bisect-status',
        dest='status',
        help='Filter tasks by status (e.g., success, failure, pending)'
    )
    parser.add_argument(
        '--limit',
        type=int,
        default=1000,
        help='Maximum number of results to return'
    )

    # Output options
    parser.add_argument(
        '--export-csv',
        help='Export results to CSV file'
    )
    parser.add_argument(
        '--debug',
        action='store_true',
        help='Enable debug mode'
    )
    parser.add_argument(
        '--sample-data',
        action='store_true',
        help='Use sample data for demonstration (when database has no data)'
    )
    parser.add_argument(
        '--show-failed',
        action='store_true',
        help='Show failed task analysis, clustered by error type'
    )
    parser.add_argument(
        '--show-table',
        action='store_true',
        help='Display bisect task details in table format'
    )
    parser.add_argument(
        '--table-rows',
        type=int,
        default=200,
        help='Maximum number of rows to display in table (default 200)'
    )
    parser.add_argument(
        '--table-status',
        dest='table_status_filter',
        choices=['wait', 'processing', 'pending_verification', 'failed', 'success'],
        help='Filter table by status (choices: wait, processing, pending_verification, failed, success)'
    )
    parser.add_argument(
        '--no-sort',
        action='store_true',
        help='Disable sorting by status (default sort order: wait > failed > success)'
    )

    args = parser.parse_args()

    if args.debug:
        logger.setLevel(logging.DEBUG)
        logger.debug("Debug mode enabled")

    # Create analysis tool
    analyzer = BisectAnalysis()

    # Handle failed task analysis
    if args.show_failed:
        logger.info("Querying failed bisect tasks for error clustering analysis...")
        failed_tasks = analyzer.query_bisect_tasks(
            hours=args.hours,
            status=None,  # Do not use status parameter
            limit=args.limit,
            include_failed=True  # Only query failed tasks
        )

        if failed_tasks:
            logger.info(f"Found {len(failed_tasks)} failed bisect tasks")
            failed_analysis = analyzer.analyze_failed_tasks(failed_tasks)
            if failed_analysis:
                print(analyzer.format_failed_task_analysis(failed_analysis))
            else:
                print("No failed tasks to analyze")
        else:
            logger.info("No failed bisect tasks found")
        return

    # Execute query
    if args.sample_data:
        logger.info("Using sample data to demonstrate HEAD test check functionality")
        tasks = analyzer.get_sample_tasks()
    else:
        tasks = analyzer.query_bisect_tasks(
            hours=args.hours,
            status=args.status,
            limit=args.limit
        )

    if not tasks:
        logger.info("No matching bisect tasks found")
        # Add detailed error information
        logger.info("Possible reasons:")
        logger.info("1. No bisect task records in database")
        logger.info("2. Time range (--hours) set too short")
        logger.info("3. Status filter (--bisect-status) too restrictive")
        logger.info("4. Database table structure mismatch")
        logger.info("Suggestions:")
        logger.info("a. Check if py_bisect.py successfully recorded tasks")
        logger.info("b. Query database directly: SELECT * FROM bisect LIMIT 1")
        logger.info("c. Use --sample-data parameter to view sample data demonstration")
        return

    logger.info(f"Found {len(tasks)} bisect tasks")

    # If table display is enabled, show table directly
    if args.show_table:
        print("\n" + "=" * 120)
        print("Bisect Tasks Table")
        print("=" * 120)
        print(analyzer.format_tasks_table(
            tasks,
            max_rows=args.table_rows,
            sort_by_status=not args.no_sort,
            status_filter=args.table_status_filter
        ))

        # Export to CSV if needed
        if args.export_csv:
            analyzer.export_to_csv(tasks, args.export_csv)

        return

    # Analyze results
    analysis = analyzer.analyze_tasks(tasks)

    # Calculate enhanced metrics
    if tasks:
        # Enhanced miss rate calculation (use same time window as bisect tasks)
        miss_rate, miss_details = analyzer.calculate_miss_rate_enhanced(tasks, time_window_hours=args.hours)
        analysis['miss_rate'] = miss_rate
        analysis['miss_details'] = miss_details

        # Enhanced success rate calculation
        success_rate, success_details = analyzer.calculate_success_rate_enhanced(tasks)
        analysis['success_rate_enhanced'] = success_rate
        analysis['success_details'] = success_details

        # Enhanced timeliness calculation
        timeliness_90, timeliness_details = analyzer.calculate_timeliness_enhanced(tasks)
        analysis['timeliness_90'] = timeliness_90
        analysis['timeliness_details'] = timeliness_details

        # Duplicate rate by category
        duplicate_by_category = analyzer.calculate_duplicate_rate_by_category(tasks)
        analysis['duplicate_by_category'] = duplicate_by_category

        # NEW: Calculate metrics by category (Build vs Functional)
        category_metrics = analyzer.calculate_metrics_by_category(tasks, time_window_hours=args.hours)
        analysis['category_metrics'] = category_metrics
    else:
        analysis['miss_rate'] = 0
        analysis['miss_details'] = {}
        analysis['success_rate_enhanced'] = 0
        analysis['success_details'] = {}
        analysis['timeliness_90'] = 0
        analysis['timeliness_details'] = {}
        analysis['duplicate_by_category'] = {}
        analysis['category_metrics'] = {}

    # Format output
    print(analyzer.format_analysis(analysis, args.hours))

    # Export results
    if args.export_csv:
        analyzer.export_to_csv(tasks, args.export_csv)

    # Disconnect from database
    # analyzer.disconnect()

if __name__ == "__main__":
    main()
