#!/usr/bin/env python3
"""
使用Python处理kernel分析，避免shell CSV处理问题
"""
import csv
import subprocess
import sys
import re
import os
import argparse
import yaml
import json
from pathlib import Path
from collections import Counter
from datetime import datetime, timedelta
import difflib

# 使用 LKP_SRC 环境变量来健壮地定位模块
lkp_src_env = os.environ.get('LKP_SRC')
if lkp_src_env:
    bisect_py_path = Path(lkp_src_env) / 'programs' / 'bisect-py'
else:
    # 如果 LKP_SRC 未设置，则从当前文件路径推断。
    # 脚本在 .../bisect-py/dataset_validator/ 中, 模块在 .../bisect-py/ 中。
    bisect_py_path = Path(__file__).resolve().parent.parent

if str(bisect_py_path) not in sys.path:
    sys.path.insert(0, str(bisect_py_path))

from py_bisect import GitBisect
from bisect_log_config import logger

def get_job_health(job_id):
    """获取job状态"""
    try:
        cmd = ['cci', 'jobs', '-f', 'job_health', '--manticore', f'id={job_id}']
        result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
        if result.returncode == 0:
            lines = result.stdout.strip().split('\n')
            if len(lines) > 1:
                return lines[1].strip()
        return "UNKNOWN"
    except:
        return "UNKNOWN"

def get_job_errid(job_id):
    """获取job错误ID"""
    try:
        cmd = ['cci', 'jobs', '-f', 'errid', '--manticore', f'id={job_id}']
        result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
        if result.returncode == 0:
            lines = result.stdout.strip().split('\n')
            if len(lines) > 1:
                return lines[1].strip()
        return ""
    except:
        return ""

def get_job_result_root(job_id):
    """获取job结果目录"""
    try:
        cmd = ['cci', 'jobs', '-f', 'result_root', '--manticore', f'id={job_id}']
        result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
        if result.returncode == 0:
            lines = result.stdout.strip().split('\n')
            if len(lines) > 1:
                result_dir = lines[1].strip()
                logger.debug(f"Job {job_id} 结果目录: {result_dir}")
                return result_dir
        return ""
    except Exception:
        return ""

def get_job_times(job_id):
    """获取作业的提交和完成时间戳"""
    try:
        cmd = ['cci', 'jobs', '-f', 'submit_time,finish_time', '--manticore', f'id={job_id}']
        result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
        if result.returncode == 0:
            lines = result.stdout.strip().split('\n')
            if len(lines) > 1:
                parts = lines[1].strip().split()
                submit_time = int(parts[0]) if parts[0].isdigit() else 0
                finish_time = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else 0
                return submit_time, finish_time
        return 0, 0
    except Exception:
        return 0, 0

def _search_logs_for_patterns(result_root, patterns):
    """
    在指定目录的 .log 文件中搜索多个模式。
    会完整遍历文件以找出所有匹配的模式，并记录下来。
    只要找到任何一个模式，最终就返回 True。
    """
    found_patterns = set()
    patterns_to_find = set(patterns)

    for dirpath, _, filenames in os.walk(result_root):
        for filename in filenames:
            if filename.endswith(".log"):
                file_path = os.path.join(dirpath, filename)
                try:
                    with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                        for line in f:
                            # 使用 set 的差集操作来高效地查找尚未找到的模式
                            remaining_patterns = patterns_to_find - found_patterns
                            for pattern in remaining_patterns:
                                if pattern in line:
                                    logger.debug(f"在 {file_path} 中找到模式 '{pattern}'")
                                    found_patterns.add(pattern)
                            # 如果所有模式都已找到，可以提前退出以优化性能
                            if found_patterns == patterns_to_find:
                                return True, list(found_patterns)
                except Exception as e:
                    logger.warning(f"读取日志文件 {file_path} 时出错: {e}")
    
    # 只要找到了任何一个模式，就认为成功
    if found_patterns:
        return True, list(found_patterns)
    
    return False, []

def check_error_reproduction(job_id, core_error):
    """
    检查错误复现性。
    - 对于 "unmet dependencies" 错误，拆分为两个部分进行“或”匹配。
    - 对于其他错误，直接匹配。
    """
    if not core_error:
        logger.warning(f"Job {job_id}: core_error 为空，无法检查复现")
        return "无法检查", "core_error为空"
    
    result_root = get_job_result_root(job_id)
    if not result_root:
        logger.warning(f"Job {job_id}: 无法获取结果目录")
        return "无法检查", "无法获取结果目录"
    
    if not os.path.exists(result_root):
        logger.warning(f"Job {job_id}: 结果目录不存在: {result_root}")
        return "否", "结果目录不存在"

    patterns_to_search = []
    unmet_match = re.match(r"WARNING: unmet direct dependencies detected for (\w+) when selected by (\w+)", core_error, re.IGNORECASE)
    if unmet_match:
        # 拆分为两个独立的模式
        main_warning = f"unmet direct dependencies detected for {unmet_match.group(1)}"
        selector = unmet_match.group(2)
        patterns_to_search.extend([main_warning, selector])
        logger.debug(f"Job {job_id}: 拆分搜索模式: {patterns_to_search}")
    else:
        patterns_to_search.append(core_error)
        logger.debug(f"Job {job_id}: 使用单一搜索模式: '{core_error}'")

    is_found, found_list = _search_logs_for_patterns(result_root, patterns_to_search)
    
    if is_found:
        # 根据您的要求，只要匹配上一个就算成功
        return "是", "; ".join(found_list)
    else:
        return "否", "未找到匹配"

def build_pkg_error_id(line: str) -> str:
    """
    从日志行生成一个规范的错误ID，移植自 ruby 的 build_pkg_error_id 函数。
    """
    # 移除行尾换行符
    line = line.rstrip('\r\n')
    # 移除 ANSI 转义序列
    line = re.sub(r'\x1b\[([0-9]{1,2}(;[0-9]{1,2})?)?[mK]', '', line)
    # 替换版本号等
    line = re.sub(r'\b[3-9]\.[0-9]+[-a-z0-9.]+', '#', line)
    # 替换日期
    line = re.sub(r'\b[1-9][0-9]-[A-Z][a-z]+-[0-9]{4}\b', '#', line)
    # 替换十六进制数
    line = re.sub(r'\b0x[0-9a-f]+\b', '#', line, flags=re.IGNORECASE)
    # 替换 SHA-1
    line = re.sub(r'\b[a-f0-9]{40}\b', '#', line, flags=re.IGNORECASE)
    # 替换数字
    line = re.sub(r'\b[0-9][0-9.]*', '#', line)
    # 修复被错误替换的 '0x'
    line = re.sub(r'#x\b', '0x', line)
    # 替换特殊字符
    line = re.sub(r'[\\"\$]', '~', line)
    # 标准化空格
    line = re.sub(r'[ \t]', ' ', line)
    line = re.sub(r' {2,}', ' ', line)
    # 移除特定位置的空格
    line = re.sub(r'([^a-zA-Z0-9]) ', r'\1', line)
    line = re.sub(r' ([^a-zA-Z0-9])', r'\1', line)
    # 移除行首的空格和 '-'
    line = line.lstrip(' ')
    line = line.lstrip('-')
    # 特殊替换
    line = re.sub(r'  _', '_', line)
    # 将空格替换为 '-'
    line = line.replace(' ', '-')
    # 移除行尾的标点符号
    line = re.sub(r'[-_.,;:#\[\]()]+$', '', line)
    # 将多个标点符号压缩为 ':'
    line = re.sub(r'([-_.,;:#!]){3,}', ':', line)
    # 移除 /tmp/ 下的临时文件路径
    line = re.sub(r'^/tmp/(?!lkp).*?/', '', line)
    line = re.sub(r'^/tmp/lkp/\w+\.(?=(\w+\.){2}\w+:)', '', line)
    
    return line

def check_errid_correctness(job_id, core_error):
    """
    使用从 makepkg/parse 移植的精确 errid 生成逻辑来检查 errid 的正确性。
    支持精确匹配、部分匹配和相似度匹配。
    返回一个元组 (status, errid)，其中 status 为 "是"、"否" 或 "无法检查"，
    errid 为匹配成功时数据库中的 errid。
    """
    if not core_error:
        logger.warning(f"Job {job_id}: core_error 为空，无法检查 errid")
        return "无法检查", ""

    actual_errid_str = get_job_errid(job_id)
    if not actual_errid_str:
        logger.warning(f"Job {job_id}: 无法获取 errid")
        return "无法检查", ""

    # cci 可能返回多个 errid，以空格分隔，需要拆分处理
    actual_errids = actual_errid_str.split()
    
    # 检查 unmet dependencies，使用部分匹配逻辑
    unmet_match = re.match(r"WARNING: unmet direct dependencies detected for (\w+)", core_error, re.IGNORECASE)
    if unmet_match:
        partial_core_error = f"WARNING: unmet direct dependencies detected for {unmet_match.group(1)}"
        generated_errid = build_pkg_error_id(partial_core_error)
        
        for actual_errid in actual_errids:
            # 清理每个实际 errid，移除各种前缀和后缀
            comparable_errid = re.sub(r'^(makepkg\.eid\.|eid\.)?(=>)*', '', actual_errid)
            if comparable_errid.endswith(':001'):
                comparable_errid = comparable_errid[:-4]

            # 精确匹配（移除前缀后）
            if generated_errid == comparable_errid:
                logger.debug(f"Job {job_id}: unmet-dependencies errid 精确匹配成功 ('{generated_errid}' == '{comparable_errid}')")
                return "是", actual_errid
            # 如果精确匹配失败，尝试子字符串匹配
            elif generated_errid in comparable_errid:
                logger.debug(f"Job {job_id}: unmet-dependencies errid 部分匹配成功 ('{generated_errid}' in '{comparable_errid}')")
                return "是", actual_errid
    else:
        # 对于其他错误，先尝试精确匹配
        generated_errid = build_pkg_error_id(core_error)
        for actual_errid in actual_errids:
            comparable_errid = re.sub(r'^(makepkg\.eid\.|eid\.)?(=>)*', '', actual_errid)
            if comparable_errid.endswith(':001'):
                comparable_errid = comparable_errid[:-4]

            if generated_errid == comparable_errid:
                logger.debug(f"Job {job_id}: errid 精确匹配成功 ('{generated_errid}')")
                return "是", actual_errid
        
        # 如果精确匹配失败，尝试部分匹配（generated_errid 包含在 comparable_errid 中）
        for actual_errid in actual_errids:
            comparable_errid = re.sub(r'^(makepkg\.eid\.|eid\.)?(=>)*', '', actual_errid)
            if comparable_errid.endswith(':001'):
                comparable_errid = comparable_errid[:-4]
            
            if generated_errid in comparable_errid:
                logger.debug(f"Job {job_id}: errid 部分匹配成功 ('{generated_errid}' in '{comparable_errid}')")
                return "是", actual_errid
        
        # 最后尝试反向部分匹配（comparable_errid 包含在 generated_errid 中）
        # 这种情况适用于 core_error 包含更多信息的场景
        for actual_errid in actual_errids:
            comparable_errid = re.sub(r'^(makepkg\.eid\.|eid\.)?(=>)*', '', actual_errid)
            if comparable_errid.endswith(':001'):
                comparable_errid = comparable_errid[:-4]
            
            if comparable_errid in generated_errid:
                logger.debug(f"Job {job_id}: errid 反向部分匹配成功 ('{comparable_errid}' in '{generated_errid}')")
                return "是", actual_errid
        
        # 如果所有字符串匹配都失败，尝试相似度匹配（阈值60%）
        similarity_threshold = 0.6
        best_similarity = 0.0
        best_match = ""
        
        for actual_errid in actual_errids:
            comparable_errid = re.sub(r'^(makepkg\.eid\.|eid\.)?(=>)*', '', actual_errid)
            if comparable_errid.endswith(':001'):
                comparable_errid = comparable_errid[:-4]
            
            similarity = calculate_similarity_score(generated_errid, comparable_errid)
            if similarity >= similarity_threshold and similarity > best_similarity:
                best_similarity = similarity
                best_match = actual_errid
        
        if best_match:
            logger.debug(f"Job {job_id}: errid 相似度匹配成功 ('{generated_errid}' 与 '{best_match}' 相似度: {best_similarity:.2%})")
            return "是", best_match

    # 如果所有匹配都失败，记录日志并返回失败
    log_actual_errid = (actual_errid_str[:150] + '...') if len(actual_errid_str) > 150 else actual_errid_str
    logger.debug(f"Job {job_id}: errid 不匹配。预期: '{generated_errid}', 实际(截断): '{log_actual_errid}'")
    return "否", ""


def apply_overrides(template, overrides):
    """
    Apply overrides to or delete keys from the job template.
    A simplified version inspired by linux_auto_test.py.
    - To override/add: 'key.path=value'
    - To delete: 'key.path='
    """
    for override in overrides:
        if '=' not in override:
            logger.warning(f"Invalid override format: {override}")
            continue

        path, value = override.split('=', 1)
        keys = path.split('.')
        try:
            # --- Deletion Logic ---
            if not value:
                parent = template
                # Traverse to the parent dictionary
                for key in keys[:-1]:
                    # If any part of the path doesn't exist, we can't delete.
                    if key not in parent or not isinstance(parent.get(key), dict):
                        parent = None
                        break
                    parent = parent[key]

                # If path is valid and key exists, delete it.
                if parent and keys[-1] in parent:
                    del parent[keys[-1]]
                    logger.debug(f"Deleted field: {path}")
                else:
                    logger.warning(f"Cannot delete '{path}': path or key not found.")
                continue

            # --- Existing Override/Add Logic ---
            current = template
            for key in keys[:-1]:
                if key not in current or not isinstance(current.get(key), dict):
                    current[key] = {}
                current = current[key]

            try:
                parsed_value = yaml.safe_load(value)
                current[keys[-1]] = parsed_value
            except:
                current[keys[-1]] = value
            logger.debug(f"Overridden field: {path} = {current[keys[-1]]}")
        except Exception as e:
            logger.error(f"Failed to apply override '{path}={value}': {e}")
    return template

def resubmit_jobs_from_csv(csv_file, output_csv, force_resubmit=False, extra_args=None):
    """
    从CSV文件中读取job ID，重新提交作业，并覆盖式地将新的job_id和result_root写回。
    会检查名为 'W=1' 的列，如果值为 '是' 或 'Y'，则自动为该行添加 extra_args。
    """
    gb = GitBisect()

    with open(csv_file, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        header = next(reader)
        rows = list(reader)

    # --- 动态获取和确保关键列的索引 ---
    def get_or_add_idx(col_name):
        try:
            return header.index(col_name)
        except ValueError:
            header.append(col_name)
            return len(header) - 1

    job_id_idx = get_or_add_idx('job_id')
    result_root_idx = get_or_add_idx('result_root')

    # 查找 'W=1' 列的索引，如果不存在则为 None
    w1_col_idx = header.index('W=1') if 'W=1' in header else None

    # 定义需要清空的分析列（基于新的CSV格式）
    analysis_cols = ["is_reproducible", "search_keywords"]
    analysis_indices = {col: get_or_add_idx(col) for col in analysis_cols}

    results = []
    for i, row in enumerate(rows):
        # 确保行长度与表头匹配
        while len(row) < len(header):
            row.append("")
            
        old_job_id = row[job_id_idx].strip()
        if not old_job_id:
            logger.warning(f"第{i+2}行job_id为空或缺失，跳过")
            results.append(row)
            continue
        
        logger.info(f"处理第{i+2}行: 原job_id={old_job_id}")
        
        try:
            original_job = gb.init_job_content(old_job_id)
            if not original_job:
                raise ValueError("无法获取原始作业信息")

            # --- 动态处理 extra_args ---
            row_specific_args = list(extra_args) if extra_args else []
            if w1_col_idx is not None and len(row) > w1_col_idx:
                w1_value = row[w1_col_idx].strip().lower()
                if w1_value in ['是', 'y', 'yes']:
                    w1_arg = 'program.makepkg.extra_args="W=1"'
                    logger.info(f"检测到 W=1 列为 '{row[w1_col_idx]}', 添加参数: {w1_arg}")
                    row_specific_args.append(w1_arg)

            if row_specific_args:
                logger.info(f"应用最终参数: {row_specific_args}")
                original_job = apply_overrides(original_job, row_specific_args)
            
            new_job_info = gb.submit_job(original_job, force=force_resubmit)
            if new_job_info:
                new_job_id, new_result_root = new_job_info
                logger.info(f"成功重新提交作业: 原{old_job_id} -> 新{new_job_id}")
                
                # --- 覆盖式写入 ---
                row[job_id_idx] = new_job_id
                row[result_root_idx] = new_result_root
                
                # 清空旧的分析结果
                for col, idx in analysis_indices.items():
                    row[idx] = ""
            else:
                raise ValueError("重新提交作业失败")
            
        except Exception as e:
            logger.error(f"处理作业{old_job_id}时发生异常: {e}")
        
        results.append(row)
    
    with open(output_csv, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(header)
        writer.writerows(results)
    
    logger.info(f"重新提交完成! 结果已覆盖式写入到 {output_csv}")
    return True

def format_duration(seconds):
    """将秒数格式化为 HH:MM:SS"""
    if seconds < 0:
        return "N/A"
    return str(timedelta(seconds=int(seconds)))

def analyze_csv_stats(csv_file):
    """
    读取CSV文件并统计 job_health, is_reproducible, 以及任务耗时。
    """
    try:
        with open(csv_file, 'r', encoding='utf-8') as f:
            reader = csv.DictReader(f)
            rows = list(reader)
            header = reader.fieldnames or []
    except Exception as e:
        logger.error(f"读取CSV文件时出错: {e}")
        return

    total_rows = len(rows)
    if total_rows == 0:
        print("CSV文件为空，无数据可统计。\n")
        return

    # --- 静态数据统计 ---
    def get_value(row, key):
        val = row.get(key)
        if val is None:
            return '[Missing Column]'
        val = val.strip()
        if val == '':
            return '[Empty]'
        if key == 'job_health' and val == 'UNKNOWN':
            return '未完成'
        return val

    health_counts = Counter(get_value(row, 'job_health') for row in rows)
    reproducible_counts = Counter(get_value(row, 'is_reproducible') for row in rows)
    errid_correct_counts = Counter(get_value(row, 'errid_correct') for row in rows)

    # --- 动态耗时统计 ---
    durations = []
    incomplete_jobs = []
    print("正在查询每个作业的耗时信息，请稍候...")
    for i, row in enumerate(rows):
        job_id = row.get('job_id', '').strip()
        if not job_id:
            continue
        
        print(f"\r查询进度: {i+1}/{total_rows}", end="")
        submit_time, finish_time = get_job_times(job_id)
        
        if finish_time > 0 and submit_time > 0:
            durations.append((finish_time - submit_time, job_id))
        elif submit_time > 0:
            submit_time_str = datetime.fromtimestamp(submit_time).strftime('%Y-%m-%d %H:%M:%S')
            incomplete_jobs.append((job_id, submit_time_str))
    print("\n查询完成。\n")

    # --- 打印报告 ---
    print("\n--- CSV 数据统计报告 ---")
    print(f"文件: {os.path.basename(csv_file)}")
    print(f"总行数: {total_rows}\n")

    print("Job Health 统计:")
    if 'job_health' not in header:
        print("  - 'job_health' 列不存在。\n")
    else:
        for status, count in health_counts.most_common():
            percentage = (count / total_rows) * 100
            print(f"  - {status:<20}: {count:>4} 行 ({percentage:.2f}%)")
    
    print("\n是否复现 统计:")
    if 'is_reproducible' not in header:
        print("  - 'is_reproducible' 列不存在。\n")
    else:
        for status, count in reproducible_counts.most_common():
            percentage = (count / total_rows) * 100
            print(f"  - {status:<20}: {count:>4} 行 ({percentage:.2f}%)")

    print("\nERRID 是否正确捕获 统计:")
    if 'errid_correct' not in header:
        print("  - 'errid_correct' 列不存在。\n")
    else:
        for status, count in errid_correct_counts.most_common():
            percentage = (count / total_rows) * 100
            print(f"  - {status:<20}: {count:>4} 行 ({percentage:.2f}%)")

    print("\n任务耗时统计:")
    if durations:
        total_duration_seconds = sum(d[0] for d in durations)
        avg_duration = total_duration_seconds / len(durations)
        max_duration_tuple = max(durations, key=lambda item: item[0])
        min_duration_tuple = min(durations, key=lambda item: item[0])
        
        print(f"  - 已完成任务数      : {len(durations)}")
        print(f"  - 平均耗时          : {format_duration(avg_duration)}")
        print(f"  - 最高耗时          : {format_duration(max_duration_tuple[0])} (Job ID: {max_duration_tuple[1]})")
        print(f"  - 最低耗时          : {format_duration(min_duration_tuple[0])} (Job ID: {min_duration_tuple[1]})")
    else:
        print("  - 没有已完成的任务可供统计耗时。\n")

    if incomplete_jobs:
        print("\n未完成的任务:")
        for job_id, submit_time_str in incomplete_jobs:
            print(f"  - Job ID: {job_id}, 提交时间: {submit_time_str}")
    
    print("\n--- 报告结束 ---\\n")

def create_and_submit_makepkg_jobs_from_csv(csv_file, output_csv, force_submit=False, testbox='dc-8g', default_git_url=None):
    """
    从CSV文件中读取head, config, mail_link信息，创建makepkg任务并提交
    将获得的job_id写回CSV文件
    """
    gb = GitBisect()

    with open(csv_file, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        header = next(reader)
        rows = list(reader)

    # --- 动态获取和确保关键列的索引 ---
    def get_or_add_idx(col_name):
        try:
            return header.index(col_name)
        except ValueError:
            header.append(col_name)
            return len(header) - 1

    def get_idx(name):
        try:
            return header.index(name)
        except ValueError:
            logger.error(f"CSV文件中缺少必需的列: '{name}'\n")
            return None

    # 获取必需列的索引
    head_idx = get_idx('head')
    config_idx = get_idx('config')
    mail_link_idx = get_idx('mail_link')
    job_id_idx = get_or_add_idx('job_id')
    result_root_idx = get_or_add_idx('result_root')

    # 查找 'W=1' 列的索引，如果不存在则为 None
    w1_col_idx = header.index('W=1') if 'W=1' in header else None

    if head_idx is None or config_idx is None or mail_link_idx is None:
        logger.error("CSV文件缺少必需的列 (head, config, mail_link)")
        return False

    results = []
    created_jobs = 0
    skipped_jobs = 0

    for i, row in enumerate(rows):
        # 确保行长度与表头匹配
        while len(row) < len(header):
            row.append("")

        head_commit = row[head_idx].strip() if len(row) > head_idx else ""
        config = row[config_idx].strip() if len(row) > config_idx else ""
        mail_link = row[mail_link_idx].strip() if len(row) > mail_link_idx else ""
        existing_job_id = row[job_id_idx].strip() if len(row) > job_id_idx else ""

        # 检查 W=1 列
        w1_value = ""
        if w1_col_idx is not None and len(row) > w1_col_idx:
            w1_value = row[w1_col_idx].strip()

        # 检查是否已有job_id且不强制提交
        if existing_job_id and not force_submit:
            logger.info(f"第{i+2}行已有job_id={existing_job_id}，跳过")
            skipped_jobs += 1
            results.append(row)
            continue

        # 检查必需字段
        if not head_commit or not config:
            logger.warning(f"第{i+2}行缺少head或config信息，跳过")
            skipped_jobs += 1
            results.append(row)
            continue

        logger.info(f"处理第{i+2}行: head={head_commit}, config={config}")
        if w1_value:
            logger.info(f"  W=1列值: {w1_value}")

        try:
            # URL优先级：1. 命令行参数 2. 系统默认 3. mail_link推断（最低优先级）
            git_url = default_git_url or "git://172.168.131.113:9418/new-upstream/l/linux/openeuler-kernel.git"

            # 如果没有通过参数指定URL，尝试从mail_link推断
            if not default_git_url:
                inferred_url = extract_git_url_from_mail_link(mail_link)
                if inferred_url:
                    git_url = inferred_url
                    logger.info(f"从mail_link推断git URL: {git_url}")
                else:
                    logger.info(f"无法从mail_link推断git URL，使用系统默认: {git_url}")
            else:
                logger.info(f"使用命令行指定的git URL: {git_url}")

            # 构建任务配置
            job_template = {
                'suite': 'makepkg',
                'testbox': testbox,
                'program': {
                    'makepkg': {
                        'commit': head_commit,
                        'config': config,
                        '_url': git_url,
                        'project': 'linux'
                    }
                }
            }

            # 如果 W=1 列为 '是', 'Y', 'yes' 等，添加 extra_args
            if w1_value.lower() in ['是', 'y', 'yes', '1', 'true']:
                job_template['program']['makepkg']['extra_args'] = 'W=1'
                logger.info(f"  检测到 W=1={w1_value}，添加 extra_args='W=1'")

            # 提交任务
            new_job_info = gb.submit_job(job_template, force=force_submit)
            if new_job_info:
                new_job_id, new_result_root = new_job_info
                logger.info(f"成功创建任务: job_id={new_job_id}")

                # 更新CSV行数据
                row[job_id_idx] = new_job_id
                row[result_root_idx] = new_result_root
                created_jobs += 1
            else:
                logger.error(f"创建任务失败")

        except Exception as e:
            logger.error(f"处理第{i+2}行时发生异常: {e}")

        results.append(row)

    # 写入结果
    with open(output_csv, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(header)
        writer.writerows(results)

    logger.info(f"任务创建完成! 创建了 {created_jobs} 个任务，跳过 {skipped_jobs} 个，结果已写入到 {output_csv}")
    return True

def extract_git_url_from_mail_link(mail_link):
    """
    从邮件链接中提取git仓库URL
    根据邮件列表域名推断git仓库地址
    """
    if not mail_link:
        return None

    try:
        from urllib.parse import urlparse
        parsed = urlparse(mail_link)

        if 'openeuler.org' in parsed.netloc:
            return "git://172.168.131.113:9418/new-upstream/l/linux/openeuler-kernel.git"
        elif 'kernel.org' in parsed.netloc:
            return "git+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git"
        # 可以根据需要添加更多映射

    except Exception as e:
        logger.warning(f"解析邮件链接时出错: {e}")

    return None

def create_bisect_tasks_from_csv(csv_file):
    """
    从CSV文件中筛选出潜在的bisect任务，检查它们是否已存在于数据库中，
    并将不存在的新任务保存到文本文件中。
    """
    cci_src = os.environ.get('CCI_SRC')
    if not cci_src:
        logger.error("环境变量 CCI_SRC 未设置，无法找到 bisect_api.py。\n")
        return

    bisect_api_path = Path(cci_src) / 'sbin' / 'bisect_api.py'
    if not bisect_api_path.is_file():
        logger.error(f"bisect_api.py 不存在于: {bisect_api_path}\n")
        return

    try:
        with open(csv_file, 'r', encoding='utf-8') as f:
            reader = csv.DictReader(f)
            rows = list(reader)
    except Exception as e:
        logger.error(f"读取CSV文件时出错: {e}")
        return

    # 收集所有需要检查的bisect任务的job_id和errid组合
    tasks_to_check = []
    seen_errids = set()
    job_count = 0
    max_tasks = 2000  # 任务数量限制

    for row in rows:
        job_id = row.get('job_id', '').strip()
        if not job_id:
            continue

        job_count += 1
        logger.info(f"处理Job {job_id} - 获取所有errid...")

        actual_errid_str = get_job_errid(job_id)
        if not actual_errid_str:
            logger.warning(f"Job {job_id}: 无法获取errid，跳过")
            continue

        actual_errids = actual_errid_str.split()
        logger.info(f"Job {job_id}: 发现 {len(actual_errids)} 个errid")

        for errid in actual_errids:
            if len(tasks_to_check) >= max_tasks:
                logger.info(f"已达到最大任务数量限制 {max_tasks}，停止收集")
                break
            clean_errid = errid.strip()
            if clean_errid and clean_errid not in seen_errids:
                seen_errids.add(clean_errid)
                tasks_to_check.append((job_id, clean_errid))
        if len(tasks_to_check) >= max_tasks:
            break

    # --- 保存收集到的所有唯一 errid 到文件 ---
    collected_errids_filename = "collected_unique_errids.txt"
    try:
        with open(collected_errids_filename, 'w', encoding='utf-8') as f:
            for errid in sorted(list(seen_errids)):
                f.write(f"{errid}\n")
        print(f"\n已将收集到的 {len(seen_errids)} 个唯一 errid 保存到: {collected_errids_filename}")
    except IOError as e:
        logger.error(f"写入文件 {collected_errids_filename} 失败: {e}")

    if not tasks_to_check:
        print("在CSV中没有找到包含有效 job_id 和 errid 的行。\n")
        return

    # --- 查重并保存新任务 ---
    unique_jobs = len(set(task[0] for task in tasks_to_check))
    print(f"\n=== Bisect任务查重计划 ===")
    print(f"处理的job数量: {job_count}")
    print(f"有效的job数量: {unique_jobs}")
    print(f"总计要检查的bisect任务: {len(tasks_to_check)}")
    print(f"\n开始检查数据库中是否已存在这些任务...")

    new_tasks = []
    existing_tasks_count = 0

    for i, (job_id, error_id) in enumerate(tasks_to_check, 1):
        # 根据用户反馈，使用 'list_tasks' 和 '--error_id' 来检查任务是否存在
        cmd = [
            'python3',
            str(bisect_api_path),
            'list_tasks',
            '--error_id', error_id
        ]
        # 使用\r实现单行进度更新
        print(f"\r[{i}/{len(tasks_to_check)}] 检查中: Job {job_id}, Errid {error_id[:30]}...", end="")
        try:
            result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)

            # 检查命令是否成功执行
            if result.returncode != 0:
                # 如果命令本身失败，记录错误但可能需要人工判断
                print(f"\n  [警告] 检查命令执行失败 for error_id {error_id}: {result.stderr.strip()}")
                # 假设执行失败意味着无法确认任务存在，保守起见，不添加为新任务
                continue

            # 解析 JSON 响应来判断任务是否存在
            try:
                # 尝试解析 JSON 输出
                response_data = json.loads(result.stdout)

                # 检查 count 字段或 tasks 字段
                task_count = response_data.get('count', 0)
                tasks = response_data.get('tasks', [])

                if task_count > 0 or len(tasks) > 0:
                    existing_tasks_count += 1
                else:
                    # 任务不存在
                    new_tasks.append((job_id, error_id))

            except json.JSONDecodeError as e:
                # JSON 解析失败，可能是不完整的响应
                # 尝试检查是否包含 "count": 0
                if '"count": 0' in result.stdout:
                    # 明确表示没有找到任务
                    new_tasks.append((job_id, error_id))
                elif '"count":' in result.stdout and '"tasks":' in result.stdout:
                    # 有 count 和 tasks 字段，可能找到了任务
                    existing_tasks_count += 1
                else:
                    # 无法判断，记录警告
                    print(f"\n  [警告] 无法解析 API 响应 for error_id {error_id}: {str(e)}")
                    # 保守起见，不添加为新任务
                    continue

        except Exception as e:
            # 打印换行符以避免覆盖进度条
            print(f"\n  [检查命令执行出错]: {e}")
            # 可根据需要决定是否将此任务视为新任务或跳过

    print("\n检查完成。") # 换行以结束进度条

    output_filename = "new_bisect_tasks.txt"
    if new_tasks:
        try:
            with open(output_filename, 'w', encoding='utf-8') as f:
                for job_id, errid in new_tasks:
                    f.write(f"{job_id},{errid}\n")
        except IOError as e:
            logger.error(f"写入文件 {output_filename} 失败: {e}")
            return
    else:
        # 如果没有新任务，也创建一个空文件或消息文件
        with open(output_filename, 'w', encoding='utf-8') as f:
            f.write("# No new bisect tasks found.\n")


    # --- 最终统计 ---
    print(f"\n=== 查重结果统计 ===")
    print(f"检查总数: {len(tasks_to_check)}")
    print(f"已存在任务: {existing_tasks_count}")
    print(f"发现新任务: {len(new_tasks)}")
    print(f"新任务列表已保存到: {output_filename}")

def calculate_similarity_score(predicted_errid, actual_errid):
    """计算两个errid之间的相似度分数，使用difflib.SequenceMatcher"""
    if not predicted_errid or not actual_errid:
        return 0.0
    
    # 使用difflib计算相似度
    similarity = difflib.SequenceMatcher(None, predicted_errid, actual_errid).ratio()
    return similarity

def find_most_similar_errid(predicted_errid, actual_errids_str, top_n=3):
    """在实际errid列表中找出与预测errid最相似的几个，返回相似度排序的结果"""
    if not predicted_errid or not actual_errids_str:
        return []
    
    actual_errids = actual_errids_str.split()
    similarities = []
    
    for actual_errid in actual_errids:
        # 清理实际errid，移除前缀和后缀
        comparable_errid = re.sub(r'^(makepkg\.eid\.|eid\.)?(=>)*', '', actual_errid)
        if comparable_errid.endswith(':001'):
            comparable_errid = comparable_errid[:-4]
        
        similarity = calculate_similarity_score(predicted_errid, comparable_errid)
        if similarity > 0:  # 只保留有相似度的
            similarities.append((similarity, actual_errid, comparable_errid))
    
    # 按相似度降序排序，返回前top_n个
    similarities.sort(key=lambda x: x[0], reverse=True)
    return similarities[:top_n]

def is_download_stage_failure(actual_errids):
    """
    检查是否为下载阶段失败
    返回 (is_download_failure, failure_type)
    """
    if not actual_errids:
        return False, None

    download_failure_patterns = [
        # 网络连接失败
        r'.*Connection_reset_by_peer.*',
        r'.*read_error.*Connection_reset_by_peer.*',
        # Git仓库下载失败
        r'.*ERROR.*Failure_while_creating_working_copy.*git_repo.*',
        r'.*ERROR.*Failed_to_clone.*git.*repo.*',
        # 其他下载相关失败
        r'.*download.*failed.*',
        r'.*fetch.*failed.*',
        r'.*clone.*failed.*',
        r'.*network.*error.*',
        r'.*timeout.*download.*'
    ]

    for errid in actual_errids:
        for pattern in download_failure_patterns:
            if re.search(pattern, errid, re.IGNORECASE):
                if 'Connection_reset_by_peer' in errid:
                    return True, "网络连接重置"
                elif 'Failure_while_creating_working_copy' in errid:
                    return True, "Git仓库克隆失败"
                else:
                    return True, "下载阶段失败"

    return False, None

def print_failed_jobs_report(failed_jobs, output_file=None):
    """以用户友好的格式打印分析失败的作业列表"""
    if not failed_jobs:
        message = "未发现复现失败或errid不匹配的作业。"
        if output_file:
            with open(output_file, 'w', encoding='utf-8') as f:
                f.write(message + "\n")
            print(f"结果已保存到: {output_file}")
        else:
            print(message)
        return

    def write_output(text, end='\n'):
        """写入输出，支持文件和控制台"""
        if output_file:
            with open(output_file, 'a', encoding='utf-8') as f:
                f.write(text + end)
        else:
            print(text, end=end)

    # 如果输出到文件，先清空文件
    if output_file:
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write("")  # 清空文件

    write_output(f"--- 分析失败任务列表 ({len(failed_jobs)}个) ---")
    write_output("")

    # 统计下载阶段失败的任务
    download_failures = []

    for job_data in failed_jobs:
        job_id = job_data.get('job_id')
        core_error = job_data.get('core_error', '')
        result_root = get_job_result_root(job_id) or "无法获取"

        # --- 检查是否为下载阶段失败 ---
        actual_errid_str = get_job_errid(job_id)
        actual_errids = actual_errid_str.split() if actual_errid_str else []
        is_download_failure, failure_type = is_download_stage_failure(actual_errids)

        if is_download_failure:
            download_failures.append({
                'job_id': job_id,
                'failure_type': failure_type,
                'result_root': result_root
            })

        # --- 构造失败原因 ---
        reasons = []
        if job_data.get('is_reproducible') == '否':
            reasons.append("错误未复现")
        if job_data.get('errid_correct') == '否':
            reasons.append("errid不匹配")
        if is_download_failure:
            reasons.append(f"下载阶段失败({failure_type})")

        # --- 获取预测的errid ---
        predicted_errid = ""
        if core_error:
            # 对于unmet dependencies，使用部分匹配逻辑
            unmet_match = re.match(r"WARNING: unmet direct dependencies detected for (\w+)", core_error, re.IGNORECASE)
            if unmet_match:
                partial_core_error = f"WARNING: unmet direct dependencies detected for {unmet_match.group(1)}"
                predicted_errid = build_pkg_error_id(partial_core_error)
            else:
                predicted_errid = build_pkg_error_id(core_error)
        
        # --- 获取实际errid并找出最相似的 ---
        actual_errid_str = get_job_errid(job_id)
        actual_errid_count = len(actual_errid_str.split()) if actual_errid_str else 0
        
        # --- 打印核心信息 ---
        write_output("------------------------------------------------------------")
        write_output(f"job_id: {job_id}")
        write_output(f"result_root: {result_root}")
        write_output(f"分析失败原因: {', '.join(reasons) or '未知'}")
        write_output(f"core_error: {core_error}")
        write_output(f"预测errid: {predicted_errid}")
        write_output(f"实际errid总数: {actual_errid_count}")

        # --- 显示最相似的errid（仅在errid不匹配时） ---
        if job_data.get('errid_correct') == '否' and predicted_errid and actual_errid_str:
            similar_errids = find_most_similar_errid(predicted_errid, actual_errid_str, top_n=3)
            if similar_errids:
                write_output("最相似的errid:")
                for i, (similarity, original_errid, comparable_errid) in enumerate(similar_errids, 1):
                    write_output(f"  {i}. {original_errid} (相似度: {similarity:.2%})")
            else:
                write_output("未找到相似的errid")
        elif actual_errid_str and actual_errid_count <= 5:
            # 如果errid总数很少，直接显示全部
            write_output(f"实际errid: {actual_errid_str}")

    write_output("------------------------------------------------------------")

    # --- 下载阶段失败汇总 ---
    if download_failures:
        write_output("")
        write_output(f"=== 下载阶段失败汇总 ({len(download_failures)}个) ===")
        write_output("这些任务在下载阶段就失败了，没有进行实际测试:")
        write_output("")

        # 按失败类型分组
        failure_groups = {}
        for failure in download_failures:
            failure_type = failure['failure_type']
            if failure_type not in failure_groups:
                failure_groups[failure_type] = []
            failure_groups[failure_type].append(failure)

        for failure_type, jobs in failure_groups.items():
            write_output(f"【{failure_type}】({len(jobs)}个):")
            for job in jobs:
                write_output(f"  job_id: {job['job_id']}")
                write_output(f"  result_root: {job['result_root']}")
                write_output("")

    write_output("=" * 60)

    if output_file:
        print(f"分析失败报告已保存到: {output_file}")
        if download_failures:
            print(f"发现 {len(download_failures)} 个下载阶段失败的任务")
    elif download_failures:
        print(f"\n*** 警告: 发现 {len(download_failures)} 个下载阶段失败的任务 ***")


def main():
    parser = argparse.ArgumentParser(
        description='Dataset validator and job resubmission tool',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument('csv_file', help='Input CSV file path')
    
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--resubmit', action='store_true', help='Resubmit jobs from CSV file')
    group.add_argument('--stats', action='store_true', help='Analyze and show statistics of the CSV file')
    group.add_argument('--create-bisect-tasks', action='store_true', help='Create bisect tasks for reproducible jobs')
    group.add_argument('--create-makepkg-jobs', action='store_true', help='Create makepkg jobs from CSV data (head, config, mail_link)')
    group.add_argument('--list-failures', action='store_true', help='List jobs with reproducibility or errid issues in a detailed format.')

    parser.add_argument('--output', '-o', help='Output CSV file path (default: input_resubmitted.csv)')
    parser.add_argument('--failures-output', help='Output file for --list-failures (default: print to console)')
    parser.add_argument('--force-resubmit', action='store_true', help='Force submission of a new job.')
    parser.add_argument('--extra-args', action='append', default=[], help='Extra arguments to override job config, e.g., --extra-args program.makepkg.extra_args="W=1"')
    parser.add_argument('--testbox', default='dc-8g', help='Testbox for makepkg jobs (default: dc-8g)')
    parser.add_argument('--git-url', help='Git repository URL for makepkg jobs (default: git://172.168.131.113:9418/new-upstream/l/linux/openeuler-kernel.git)')
    parser.add_argument('--force-update', action='store_true', help='强制更新所有字段，即使它们已经有值')
    parser.add_argument('--verbose', '-v', action='store_true', help='Enable verbose debug logging on console.')
    
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)
    
    args = parser.parse_args()

    if args.verbose:
        import logging
        for handler in logger.logger.handlers:
            if isinstance(handler, logging.StreamHandler):
                handler.setLevel(logging.DEBUG)
                logger.info("Verbose logging enabled.")
    
    if not Path(args.csv_file).exists():
        logger.error(f"CSV文件不存在: {args.csv_file}\n")
        sys.exit(1)

    if args.stats:
        analyze_csv_stats(args.csv_file)
        sys.exit(0)

    if args.create_bisect_tasks:
        create_bisect_tasks_from_csv(args.csv_file)
        sys.exit(0)

    if args.create_makepkg_jobs:
        output_file = args.output or f"{Path(args.csv_file).stem}_with_jobs.csv"
        logger.info(f"开始从CSV创建makepkg任务: {args.csv_file} -> {output_file}")
        success = create_and_submit_makepkg_jobs_from_csv(
            args.csv_file,
            output_file,
            force_submit=args.force_resubmit,
            testbox=args.testbox,
            default_git_url=args.git_url
        )
        sys.exit(0 if success else 1)
    
    if args.resubmit:
        output_file = args.output or f"{Path(args.csv_file).stem}_resubmitted.csv"
        logger.info(f"开始重新提交CSV中的作业: {args.csv_file} -> {output_file}")
        success = resubmit_jobs_from_csv(args.csv_file, output_file, args.force_resubmit, args.extra_args)
        sys.exit(0 if success else 1)
    
    output_file = args.output or f"{Path(args.csv_file).stem}_analyzed.csv"
    
    with open(args.csv_file, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        header = next(reader)
        rows = list(reader)
    
    def get_idx(name):
        try:
            return header.index(name)
        except ValueError:
            logger.error(f"CSV文件中缺少必需的列: '{name}'\n")
            sys.exit(1)

    job_id_idx = get_idx('job_id')
    core_error_idx = get_idx('core_error')

    # 确保分析所需的列存在（根据新的CSV格式）
    # 新格式: mail_time,mail_link,config,head,core_error,error_id,job_id,result_root,job_health,W=1,errid_correct,Reported Commit,First bad Commit,Time
    # 需要动态添加的列（如果不存在）
    analysis_cols = ["is_reproducible", "search_keywords", "download_failure"]
    for col in analysis_cols:
        if col not in header:
            header.append(col)

    results = []
    failures_for_listing = []
    for i, row in enumerate(rows):
        # 确保行足够长以容纳所有列
        while len(row) < len(header):
            row.append("")

        job_id = row[job_id_idx].strip() if len(row) > job_id_idx else None
        core_error = row[core_error_idx].strip() if len(row) > core_error_idx else None

        if not job_id:
            logger.warning(f"跳过第 {i+2} 行: job_id 为空\n")
            continue
            
        logger.info(f"处理第 {i+2} 行: job_id={job_id}")
        
        new_data = {h: "" for h in header}
        for h, val in zip(header, row):
            new_data[h] = val

        # 只更新 job_health 如果它为空或者强制更新
        if args.force_update or not new_data.get("job_health") or new_data["job_health"] in ["", "UNKNOWN"]:
            new_data["job_health"] = get_job_health(job_id)

        # 检查错误复现性（总是更新，因为这是主要分析功能）
        is_reproducible, search_keywords = check_error_reproduction(job_id, core_error)
        new_data["is_reproducible"] = is_reproducible
        new_data["search_keywords"] = search_keywords

        # 检查下载阶段失败
        actual_errid_str = get_job_errid(job_id)
        actual_errids = actual_errid_str.split() if actual_errid_str else []
        is_download_failure, failure_type = is_download_stage_failure(actual_errids)
        new_data["download_failure"] = failure_type if is_download_failure else ""

        # 只更新 errid_correct 和 error_id 如果它们为空或者强制更新
        if args.force_update or not new_data.get("errid_correct") or new_data["errid_correct"] == "":
            errid_correct_status, matched_errid = check_errid_correctness(job_id, core_error)
            new_data["errid_correct"] = errid_correct_status
            if args.force_update or not new_data.get("error_id") or new_data["error_id"] == "":
                new_data["error_id"] = matched_errid
        else:
            # 如果没有更新，从现有数据中获取值
            errid_correct_status = new_data.get("errid_correct", "")

        results.append([new_data[h] for h in header])

        if args.list_failures:
            if is_reproducible == '否' or errid_correct_status == '否':
                failures_for_listing.append(new_data)
    
    if args.list_failures:
        print_failed_jobs_report(failures_for_listing, args.failures_output)
        sys.exit(0)
        
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(header)
        writer.writerows(results)
    
    logger.info(f"分析完成! 结果已保存到 {output_file}\n")
    return 0


if __name__ == "__main__":
    sys.exit(main())
