import atexit
from dataclasses import dataclass
import datetime
import json
import os
import re
import shutil
import signal
import sys
import time
import uuid
import requests
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from urllib.parse import quote
from typing import List,Optional
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
from loguru import logger
from pygerrit2 import GerritRestAPI, HTTPBasicAuth
from unidiff import PatchSet

logger.remove()
logger.add(sys.stdout, level='INFO')
app = Flask(__name__)
CORS(app)  # 允许所有来源的请求

HTTP_SUCCESS = 200
HEADER_FILE_DETECT_LINES = 200
BLOCK_LINE_SIZE = 500
TEMP_FILE_DIR = Path('/Catalog/Errorsniffer/temp')
MAX_DETECT_LINES_PER_FILE = 5000

CODE_HEADER_FILE_EXTENSIONS = ('.h', '.hpp', '.hxx', '.hh')
CODE_FILE_EXTENSIONS = ('.h', '.hpp', '.hxx', '.hh','.c', '.cpp', '.cxx', '.cc')
# 配置全局变量
gerrit_client = None
# 正在处理的变更
new_changes = set()

class AppConfig:
    """应用配置类"""
    def __init__(self):
        self._config_loaded = False
        self.gerrit_url = None
        self.username = None
        self.password = None
        self.reportspath = None
        self.dify_api_server = None
        self.dify_api_key = None
        self.dify_api_test_key = None
        self.v5_repo = None
        self.v6_repo = None
        self.v35_repo = None
        self.v4_repo = None
        self.algo_people = None
        
    def load_config(self):
        """加载配置文件"""
        config = load_config()
        self.gerrit_url = config['gerrit']['url']
        self.username = config['gerrit']['username']
        self.password = config['gerrit']['password']
        self.reportspath = config['gerrit']['reportspath']
        self.dify_api_server = config['dify']['DIFY_API_SERVER']
        self.dify_api_key = config['dify']['DIFY_API_KEY']
        self.dify_api_test_key = config['dify']['DIFY_API_TEST_KEY']
        self.v5_repo = config['v5repo']
        self.v6_repo = config['v6repo']
        self.v35_repo = config['v35repo']
        self.v4_repo = config['v4repo']
        self.algo_people = config['algo_people']
        self._config_loaded = True

    def ensure_loaded(self):
        """确保配置已加载"""
        if not self._config_loaded:
            self.load_config()


# 创建全局配置实例
app_config = AppConfig()

def get_recommended_workers():
    """
    根据CPU核心数推荐工作线程数
    
    Returns:
        int: 推荐的线程数
    """
    cpu_cores = os.cpu_count() or 1
    # 小规模服务器 (2核)
    if cpu_cores <= 2:
        return 8  
        # 基础并发能力，避免资源耗尽
    # 中等服务器 (4核)
    elif cpu_cores <= 4:
        return 16  
        # 平衡并发和资源使用
    # 大型服务器 (8核)  
    elif cpu_cores <= 8:
        return 32  
        # 充分利用I/O等待时间
    else:
        return min(64, cpu_cores * 4) 
    
# 创建线程池
workflow_executor = ThreadPoolExecutor(
    max_workers=get_recommended_workers(),  # 根据系统资源调整
    thread_name_prefix="workflow_"
)

# 创建调度器
scheduler = BackgroundScheduler()


def shutdown_handler(signum=None, frame=None):
    """
    应用关闭处理函数，用于优雅地关闭调度器和线程池
    
    Args:
        signum: 信号编号
        frame: 当前堆栈帧
    """
    logger.info("正在关闭应用...")

    # 关闭调度器
    if scheduler.running:
        scheduler.shutdown(wait=True)
        logger.info("调度器已关闭")

    # 关闭线程池
    workflow_executor.shutdown(wait=False)
    logger.info("线程池已关闭")

    sys.exit(0)


# 注册信号处理
signal.signal(signal.SIGINT, shutdown_handler)  # Ctrl+C
signal.signal(signal.SIGTERM, shutdown_handler)  # 终止信号
atexit.register(shutdown_handler) # 注册程序正常退出时的清理函数


def init_scheduler():
    """
    初始化调度器，设置定时任务并启动调度器

    seconds: 秒数
    """
    if not scheduler.running:
        scheduler.add_job(fetch_all_repositories, 'interval', seconds=120)
        scheduler.start()
        logger.info("调度器启动成功")


# 读取配置文件
def load_config():
    """
    读取配置文件
    
    Returns:
        dict: 配置信息字典
    """
    try:
        config_path = Path(__file__).parent / 'config.json'
        with open(config_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    except FileNotFoundError:
        logger.error("错误: 配置文件未找到！")
        return {}
    except json.JSONDecodeError:
        logger.error("错误: 配置文件格式无效！")
        return {}


def fetch_all_repositories():
    """
    获取所有仓库的数据
    """
    for repository in app_config.v5_repo:
        fetch_data_job(repo=repository)
    for repository in app_config.v6_repo:
        fetch_data_job(repo=repository)
    for repository in app_config.v35_repo:
        fetch_data_job(repo=repository)
    for repository in app_config.v4_repo:
        fetch_data_job(repo=repository)


def fetch_data_job(repo=""):
    """
    定时任务：获取数据
    
    Args:
        repo (str): 仓库名称
    """
    try:
        # 获取仓库当前open的变更列表
        status = "open"
        endpoint = f"/changes/?q=project:{repo}+status:{status}"
        changes = gerrit_client.get(endpoint)
        for change in changes:
            number = change['_number']
            owner = change['owner']['_account_id']
            # 判断是否是科室人员
            if app_config.algo_people.get(str(owner)):
                # 获取review信息
                encoded_repo = quote(repo, safe='')# repo URL编码处理
                endpoint = f"/changes/{encoded_repo}~{number}/detail"
                changes = gerrit_client.get(endpoint)
                CodeReview = changes['labels']['Code-Review']
                # 判断是否有all对应键值
                if dict_has_key(CodeReview, "all"):
                    ReviewAll = changes['labels']['Code-Review']['all']
                    try:
                        result = contains_dict_with_string(ReviewAll, "username", "GW00356676")
                    except Exception as e:
                        logger.error(f"{number}发生错误: {e}")
                        logger.error(ReviewAll)
                else:
                    result = False

                if result is False:
                    logger.info(change)
                    submit_workflow_task(number, encoded_repo)
    except Exception as e:
        logger.error(f"定时任务错误: {e}")


def dict_has_key(data_list, keyword):
    """
    检查数据列表中是否包含指定关键字
    
    Args:
        data_list (list): 数据列表
        keyword (str): 要查找的关键字
        
    Returns:
        bool: 如果找到关键字返回True，否则返回False
    """
    return any(keyword in data for data in data_list)


def contains_dict_with_string(data_list, key, value):
    """
    检查数据列表中是否包含具有指定键值对的字典
    
    Args:
        data_list (list): 字典列表
        key (str): 要检查的键
        value (str): 要匹配的值
        
    Returns:
        bool: 如果找到匹配的字典返回True，否则返回False
    """
    for d in data_list:
        # 检查字典是否包含所需的键（首先检查元素是否是字典类型）
        if (isinstance(d, dict) and
                key in d and
                'value' in d and
                d[key] == value and
                d['value'] == 1):
            return True
    return False


def submit_workflow_task(change_number, repo):
    """
    提交工作流任务到线程池
    
    Args:
        change_number (int): 变更编号
        repo (str): 仓库名称
    """
    if change_number in new_changes:
        logger.info(f"变更 {change_number} 已在处理中，跳过")
        return
    
    try:
        new_changes.add(change_number)
        def _task_with_context():
            with app.app_context():
                return run_workflow_safely(change_number)
        future = workflow_executor.submit(_task_with_context)
        def _callback_with_context(f):
            with app.app_context():
                task_done_callback(f, change_number, repo)
        future.add_done_callback(_callback_with_context)
        logger.info(f"已提交变更 {change_number} 处理任务")
    except Exception as e:
        # 如果任务提交失败，清理已添加的变更编号
        new_changes.discard(change_number)
        logger.error(f"提交变更 {change_number} 处理任务失败: {e}")
        raise


def run_workflow_safely(change_number):
    """
    安全执行工作流，包含异常处理
    
    Args:
        change_number (int): 变更编号
        
    Returns:
        dict: 执行结果
        success (bool): 仅处理超时和处理出错认为失败, 其他情况认为成功
    """
    # 添加默认检测参数
    options = {'enableStaticEngine': 1, 'ignoreLevelB': 0, 'ignoreLevelC': 0}
    try:
        logger.info(f"开始处理变更 {change_number}")
        start_time = time.time()
        result = submit_workflow_run(change_number, options)
        end_time = time.time()
        logger.info(f"变更 {change_number} 处理完成，耗时: {end_time - start_time:.2f}s")
        if result['success'] is False:
            if result['message'] == 'workflow time out':
                return {'success': False, 'message': "workflow time out!"}
        else:
            if result['result'] == None:
                logger.warning(f"变更{change_number}处理结果为{result['message']}")
        return {"success": True, "change_number": change_number, "result": result['result']}
    except Exception as e:
        logger.error(f"处理变更 {change_number} 时出错: {e}")
        return {"success": False, "change_number": change_number, "message": str(e)}


def task_done_callback(future, change_number, repo):
    """
    任务完成回调
    
    Args:
        future: Future对象
        change_number (int): 变更编号
        repo (str): 仓库名称
    """
    try:
        result = future.result()
        if result['success']:
            new_changes.remove(change_number)
            logger.info(f"变更 {change_number} 成功处理完成")
            # gerrit基本信息获取
            data = get_change_info_from_gerrit(change_number)
            basic_info = data['basic_info']
            changed_files = data['changed_files']
            account_info = get_account_info_from_gerrit(basic_info['owner']['_account_id'])
            analysisResult = future.result()['result']
            if analysisResult is None:
                analysisResult = {
                    "critical_issues": [],
                    "high_risk_issues": [],
                    "code_quality_issues": []
                }

            detail_data = {
                'basicInfo': basic_info,
                'accountInfo': account_info,
                'changedFiles': changed_files,
                'analysisResult': analysisResult,
            }
            # 报告内容拼接
            reports_dir = Path('./reports/')
            reports_dir.mkdir(exist_ok=True) # exist_ok参数：如果目录已经存在，则不会抛出异常
            file_name = str(change_number) + "_report_" + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
            generate_html_report(changes_number=str(change_number), title=file_name, output_path='./reports/',
                                 data=detail_data)
            gerrit_add_review_comment(repo, change_number, file_name)
            gerrit_code_review_auto(repo, change_number)
        else:
            logger.error(f"变更 {change_number} 处理失败: {result['message']}")
            new_changes.discard(change_number)  # 从集合中移除

    except Exception as e:
        logger.error(f"任务回调错误: {e}")


def generate_html_report(changes_number="", title="Gerrit 变更报告", output_path=None, data=None):
    """
    生成HTML报告
    
    Args:
        changes_number (str): 变更编号
        title (str): 报告标题
        output_path (str): 输出路径
        data (dict): 报告数据
        
    Returns:
        str: HTML内容
    """
    template_data = {
        'title': changes_number,
        'changeID': changes_number,
        'generated_time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    }
    with app.app_context():
        html_content = render_template('report_template.html', detail_data=data, **template_data)

    if output_path:
        # 确保目录存在
        output_path_obj = Path(output_path)
        if output_path_obj.suffix:  # 包含文件拓展名，为文件路径，则创建父目录
            output_path_obj.parent.mkdir(parents=True, exist_ok=True)
        else:  # 目录路径，则直接创建目录
            output_path_obj.mkdir(parents=True, exist_ok=True)

        # 如果output_path是目录，自动添加文件名
        if output_path_obj.is_dir() or str(output_path).endswith(os.sep):
            output_path_obj = output_path_obj / f"{title}.html"

        output_path_obj.write_text(html_content, encoding='utf-8')
        logger.info(f"HTML报告已生成: {output_path_obj}")

    return html_content


def gerrit_add_review_comment(repo, change_number, file_name):
    """
    在Gerrit中添加评审评论
    
    Args:
        repo (str): 仓库名称
        change_number (int): 变更编号
        file_name (str): 文件名
    """
    file_name = file_name + ".html"
    message = f"ErrorSniffer Analysis Report: {app_config.reportspath}/{file_name}"
    # 要发送的数据
    data = {
        "message": message
    }
    logger.info(f"reply in the gerrit:{message}!!!!")
    endpoint = f"/changes/{repo}~{change_number}/revisions/current/review"
    response = gerrit_client.post(
        endpoint,
        return_response=True,
        json=data)
        
    # 检查响应
    if response[1].status_code == HTTP_SUCCESS:
        logger.info("Change log message added successfully.", response)
    else:
        logger.info(f"Failed to add change log message. Status code: {response}")


def gerrit_code_review_auto(repo, change_number):
    """
    自动进行代码评审
    
    Args:
        repo (str): 仓库名称
        change_number (int): 变更编号
    """
    data = {
        "labels": {
            "Code-Review": 1
        },
        "message": "Automatically added +1 Code-Review vote."
    }

    endpoint = f"/changes/{repo}~{change_number}/revisions/current/review"
    response = gerrit_client.post(
        endpoint,
        return_response=True,
        json=data)

    if response[1].status_code == HTTP_SUCCESS:
        logger.info(f"Automatically added Code-Review successfully.")
    else:
        logger.info(
            f"Failed to add Code-Review Automatically. Status code: {response} {response[1]} {type(response[1])}")


def get_change_info_from_gerrit(change_id):
    """
    从Gerrit获取变更信息
    
    Args:
        change_id (str): 变更ID
        
    Returns:
        dict: 变更信息
    """
    #  获取提交基本信息
    endpoint = f"/changes/{change_id}"
    basic_info = gerrit_client.get(endpoint)

    # 获取代码改动文件
    endpoint = f"/changes/{change_id}/revisions/current/files"
    changed_files = gerrit_client.get(endpoint)

    data = {
        "change_id": change_id,
        "basic_info": basic_info,
        "changed_files": changed_files
    }
    logger.info(data)
    return data


def get_account_info_from_gerrit(account_id):
    """
    从Gerrit获取账户信息
    
    Args:
        account_id (str): 账户ID
        
    Returns:
        dict: 账户信息
    """
    endpoint = f"/accounts/{account_id}"
    account_info = gerrit_client.get(endpoint)

    data = {
        "account_id": account_id,
        "account_info": account_info
    }
    logger.info(data)
    return data


def workflow_run(code_changes:str, options:dict):
    """
    运行工作流
    
    Args:
        code_changes (str): 代码变更内容
        options (dict): 选项参数
        
    Returns:
        dict: 工作流执行结果
    """
    if 'prompt' in options:
        headers = {
            'Authorization': 'Bearer ' + app_config.dify_api_test_key, 
            'Content-Type': 'application/json'  
        }
        parm = {
            "inputs": {
                "code_changes" : code_changes, 
                "new_h_flag" : options['new_h_flag'],
                "result_dir" : options['result_dir'],
                "check_rules" : options['prompt'],
                }, 
            "user" : "vluo", 
            "response_mode" : "blocking",
        }
    else:
        headers = {
            'Authorization': 'Bearer ' + app_config.dify_api_key, 
            'Content-Type': 'application/json'  
        }
        parm = {
            "inputs": {
                "code_changes" : code_changes, 
                "new_h_flag" : options['new_h_flag'],
                "result_dir" : options['result_dir'],
                }, 
            "user" : "vluo", 
            "response_mode" : "blocking",
        }
    logger.info(f"headers:{headers} parm: {parm}")
    try:
        ret = requests.post(f'{app_config.dify_api_server}/workflows/run', data=json.dumps(parm), headers=headers)
        response_data = ret.json()
        
        # 修复：检查工作流是否失败
        if response_data.get('data', {}).get('status') == 'failed':
            logger.info(f"工作流返回：{response_data}")
            if 'timeout' in response_data.get('data', {}).get('error'):
                logger.error('工作流超时导致失败')
                return 'workflow time out'
            else:
                logger.error("工作流失败")
                return None

        result = response_data.get('data', {}).get('outputs', {}).get('result')
        return result
    except Exception as e:
        logger.error(f"workflow_run error: {e}")
        logger.info(f"result: {ret.json()}")
        return None


def get_change_patch(change_id):
    """
    获取变更的补丁
    
    Args:
        change_id (str): 变更ID
        
    Returns:
        str: 补丁内容
    """
    endpoint = f"/changes/{change_id}/revisions/current/patch"
    patch = gerrit_client.get(endpoint)
    return patch

def get_front_lines(change_id, file_path : str, start_line_no):
    """
    获取文件前几行内容
    
    Args:
        change_id (str): 变更ID
        file_path (str): 文件路径
        start_line_no (int): 起始行号
        
    Returns:
        str: 文件前几行内容
    """
    file_path = file_path.replace('/', '%2F')
    endpoint = f"/changes/{change_id}/revisions/current/files/{file_path}/content"
    file_content = gerrit_client.get(endpoint)

    # 返回的结果是str类型，换行符可能为windows或linux，splitlines统一处理
    file_lines = file_content.splitlines()
    result = []
    # 然后循环添加行号
    for i, s in enumerate(file_lines[:start_line_no-1]) :
        result.append(str(i+1) + s + '\n')
    # 字符串拼接
    return ''.join(result)

@dataclass
class FunctionInfo:
    """函数信息类"""
    name: str
    start_line: int
    end_line: int
    return_type: str
    parameters: str

    def __getitem__(self, index: int) -> any:
        """支持索引访问，按定义顺序"""
        fields = [self.name, self.start_line, self.end_line, self.return_type, self.parameters]
        return fields[index]
def find_function_by_line(functions: list, target_line: int) -> Optional[FunctionInfo]:
    """
    查找指定行号所在的函数

    Args:
        file_path: 文件路径
        target_line: 目标行号

    Returns:
        函数信息, 如果不在任何函数中则返回None
    """
    if functions is None or len(functions) == 0:  # 函数列表为空
        return None
    try:

        for func in functions:
            if func.start_line <= target_line:
                if func.end_line == 0:  # 未结束的函数
                    return func
                elif target_line <= func.end_line:
                    return func

        return None
    except Exception as e:
        print(f"查找函数时出错: {e}")
        return None

function_patterns = [
    # 标准函数定义: 返回类型 函数名(参数) {
    re.compile(r'^\s*((?:[\w\*]+\s+)+)(\w+)\s*\(([^)]*)\)\s*\{?\s*$'),
    # 支持static/inline等修饰符
    re.compile(r'^\s*(?:static|inline|extern|virtual)\s+((?:[\w\*]+\s+)+)(\w+)\s*\(([^)]*)\)\s*\{?\s*$'),
    # 支持类成员函数: 返回类型 类名::函数名(参数) {
    re.compile(r'^\s*((?:[\w\*]+\s+)+)(\w+::\w+)\s*\(([^)]*)\)\s*\{?\s*$'),
]
def match_function_definition(line: str) -> Optional[tuple]:
    """匹配函数定义"""
    for pattern in function_patterns:
        match = pattern.match(line)
        if match:
            return_type = match.group(1)
            function_name = match.group(2)
            parameters = match.group(3) if pattern.groups >= 3 else ""
            return return_type, function_name, parameters
    return None

def parse_functions(file_content: list) -> List[FunctionInfo]:
    """
    解析文件中的所有函数

    Args:
        file_path: 文件路径

    Returns:
        函数信息列表
    """
    functions = []
    brace_count = 0
    current_function = None

    for line_num, line in enumerate(file_content, 1):
        # 保留原始行用于调试，但处理时去除首尾空白
        original_line = line
        line = line.strip()

        # 跳过空行
        if not line:
            continue

        # 跳过预处理指令（但不跳过包含函数定义的预处理指令）
        if line.startswith('#') and not any(pattern.search(line) for pattern in function_patterns):
            continue

        # 如果不在函数中，检查是否是函数定义
        if brace_count == 0 and current_function is None:
            function_match = match_function_definition(line)
            if function_match:
                return_type, function_name, parameters = function_match

                current_function = FunctionInfo(
                    name=function_name,
                    start_line=line_num,
                    end_line=0,
                    return_type=return_type.strip(),
                    parameters=parameters.strip()
                )

                # 如果这行包含开括号，开始计数
                if '{' in original_line:
                    brace_count = 1
                continue

        # 如果在函数中，统计大括号
        if current_function is not None:
            # 统计大括号（只统计不在注释和字符串中的）
            in_string = False
            in_comment = False
            escape_next = False
            string_char = None

            for char in original_line:
                if escape_next:
                    escape_next = False
                    continue

                if char == '\\':
                    escape_next = True
                    continue

                if not in_comment and not in_string and char == '/':
                    # 可能开始注释
                    continue

                if not in_comment and char in ['"', "'"]:
                    if not in_string:
                        in_string = True
                        string_char = char
                    elif string_char == char:
                        in_string = False
                        string_char = None
                    continue

                if not in_string and not in_comment:
                    if char == '{':
                        brace_count += 1
                    elif char == '}':
                        brace_count -= 1

                        # 如果大括号计数归零，函数结束
                        if brace_count == 0:
                            current_function.end_line = line_num
                            functions.append(current_function)
                            current_function = None
                            break

    # 处理没有正确结束的函数
    if current_function is not None:
        current_function.end_line = len(file_content)
        functions.append(current_function)

    return functions
def extract_and_chunk_functions(changes: list[tuple[int, int]], functions: list[FunctionInfo], source_lines: list, max_lines: int = BLOCK_LINE_SIZE) -> list[dict]:
        """
        提取与变更区间重叠的函数，并按500行切分为字符串列表
        
        前置条件：
        - changes 按起始行升序排列
        - functions 按起始行升序排列
        
        Returns:
            List[str]: 每个元素是一个块的完整内容字符串
        """
        if not changes or not source_lines:
            return []
        
        # 1. 有序去重提取（双指针，O(M+N)）
        affected_funcs  = []
        seen_functions = set()
        
        func_idx = 0
        for cs, ce in changes:
            # 跳过 func_end < change_start 的函数
            while func_idx < len(functions) and functions[func_idx][2] < cs:
                func_idx += 1
            
            # 检查重叠
            check_idx = func_idx
            while check_idx < len(functions) and functions[check_idx][1] <= ce:
                function = functions[check_idx]
                func_key = (function[0], function[1], function[2])
                if func_key not in seen_functions:
                    seen_functions.add(func_key)
                    affected_funcs.append(func_key)
                check_idx += 1

        
        # 2. 贪心分块（仅累积内容字符串）
        blocks: list[str] = []
        current_content: list[str] = []  # 累积当前块的行
        current_lines = 0
        
        # 特殊处理前200行是否存在非函数变更
        flag = False
        end_line = 0
        for f_start, f_end in changes:
            if f_start < HEADER_FILE_DETECT_LINES and f_end <= HEADER_FILE_DETECT_LINES:
                for line in range(f_start, f_end+1):
                    function = find_function_by_line(functions, line)
                    if function is None:
                        flag = True
                    else:
                        if function[1] <= HEADER_FILE_DETECT_LINES: 
                            end_line = function[1] - 1
        if flag:
            if end_line == 0: 
                end_line = HEADER_FILE_DETECT_LINES
            for i, line in enumerate(source_lines[0:end_line]):
                current_content.append(str(i+1) + line)  
        current_lines = len(current_content)

        for _, f_start, f_end in affected_funcs:
            if flag and f_start < end_line:
                continue
            func_lines = []
            for i, line in enumerate(source_lines[f_start-1:f_end]):
                func_lines.append(str(i+f_start) + line)
            func_line_count = len(func_lines)
            
            # 情况A：函数本身超过max_lines → 强制拆分
            if func_line_count > max_lines:
                # 保存当前块
                if current_content:
                    blocks.append(''.join(current_content))
                    current_content, current_lines = [], 0
                
                # 直接添加拆分后的块
                for i in range(0, func_line_count, max_lines):
                    blocks.append(''.join(func_lines[i:i+max_lines]))
            
            # 情况B：加入当前块会超额 → 新开一块
            elif current_lines + func_line_count > max_lines:
                blocks.append(''.join(current_content))
                current_content, current_lines = func_lines, func_line_count
            
            # 情况C：正常加入
            else:
                current_content.extend(func_lines)
                current_lines += func_line_count
        
        # 保存最后一个块
        if current_content:
            blocks.append(''.join(current_content))
        
        return blocks

def get_file_content(change_id, file_path : str):
    """
    获取文件内容
    
    Args:
        change_id (str): 变更ID
        file_path (str): 文件路径
        
    Returns:
        str: 文件内容
    """
    file_path = file_path.replace('/', '%2F')
    endpoint = f"/changes/{change_id}/revisions/current/files/{file_path}/content"
    file_content = gerrit_client.get(endpoint)

    # 返回的结果是str类型，换行符可能为windows或linux，splitlines统一处理
    file_lines = file_content.splitlines()
    result = []
    for _, s in enumerate(file_lines[:]) :
        result.append(s + '\n')

    return result
def parse_code_change(pch, change_id):
    """
    解析代码变更

    Args:
        pch (str): 补丁内容
        change_id (str): 变更ID
        
    Returns:
        list: 解析后的请求列表
    """
    patchset = PatchSet(pch)
    req_list = list()
    single_req = dict()
    for file in patchset:
        if file.added > MAX_DETECT_LINES_PER_FILE :
            continue
        if not file.path.endswith(CODE_FILE_EXTENSIONS):
            continue
        file_content = get_file_content(change_id, file.path)
        if file.path.endswith(CODE_HEADER_FILE_EXTENSIONS) and file.is_added_file:
            if single_req:
                req_list.append((json.dumps(single_req), False))
                single_req = {}
            chunk = file_content[0:BLOCK_LINE_SIZE]
            for i in range(len(chunk)):
                line = chunk[0]
                chunk.pop(0)
                chunk.append(str(i+1) + line)
            block = ''.join(chunk)
            single_req[file.path] = block
            req_list.append((json.dumps(single_req), True))
            single_req = {}
            continue
        changes = list()
        for hunk in file:
            changes.append((hunk.target_start, hunk.target_start + hunk.target_length - 1))
        functions = parse_functions(file_content)
        blocks = extract_and_chunk_functions(changes, functions, file_content)

        for block in blocks:
            single_req[file.path] = block
            req_list.append((json.dumps(single_req), False))
            single_req = {}
    return req_list

def merge_results(results, sub_results):
    """
    合并结果
    
    Args:
        results (dict): 主结果
        sub_results (dict): 子结果
    """
    # 浅拷贝，备份
    backup = results.copy()
    try:
        logger.info(f'sub_results : {sub_results}')
        # 合并检测结果
        for key, value in sub_results.items():
            if not isinstance(value, list):
                continue
            # 若key首次出现，先置空，再extend追加
            results.setdefault(key, []).extend(value)
    # 异常回滚
    except Exception as e:
        logger.error(f'merge sub_results error {e}')
        results.clear()
        results.update(backup)

def safe_rmtree(path):
    try:
        shutil.rmtree(path)
    except Exception as e:
        logger.error(f"shutil.rmtree error: {e}, dir: {path}")
def submit_workflow_run(change_id, options):
    """
    提交工作流运行
    
    Args:
        change_id (str): 变更ID
        options (dict): 选项参数
        
    Returns:
        dict: 执行结果
    """
    logger.info(f"options: {options}")
    # 根据change_id获取patch
    patch = get_change_patch(change_id)
    if patch is None:
        logger.error("patch is null")
        return {'success': False, 'message': "None", 'result': None}
    
    # 获取patch的修改内容, 进行分段
    req_list = parse_code_change(patch, change_id)

    # 请求列表为NULL，说明异常
    if req_list is None:
        return {'success': False, 'message': "None", 'result': None}

    # 如果请求列表为空
    if not req_list:
        logger.info("nothing to analyse")
        return {'success': True, 'message': "nothing to analyse", 'result': None}
    
    # 最终检测结果汇总容器
    check_result = dict()

    # 对列表中的元素，依次向工作流发起请求进行代码检测
    for res, new_header_flag in req_list:

        # 构造本次专属结果目录（时间戳+change_id+uuid 防重名）
        result_dir = f'{int(time.time())}_{change_id}_{uuid.uuid4().hex}'
        result_path = TEMP_FILE_DIR / result_dir

        # 防御式检查：理论上不会重复，但再确认一次
        if result_path.exists():
            logger.error(f'result_path: {result_path} exists')
            continue
        result_path.mkdir()
        result_path.chmod(0o777)

        # 参数补充
        options['new_h_flag'] = 1 if new_header_flag else 0
        options['result_dir'] = result_dir

        # 工作流调用，阻塞直至完成
        output = workflow_run(res, options)

        # 工作流返回异常处理
        if output is None:
            logger.error(f"workflow_run return:{output}")
            safe_rmtree(result_path)
            return {'success': True, 'message': "workflow return error", 'result': None}
        if output == 'workflow time out':
            safe_rmtree(result_path)
            return {'success': False, 'message': "workflow time out"}
        
        # 处理临时目录中所有文件
        result_files = [p for p in result_path.iterdir() if p.is_file()]
        # 记录当前请求的结果容器
        single_result = dict()
        try:
            for file in result_files:
                node = json.loads(file.read_text())
                logger.info(node)
                # 汇总到当前结果容器
                merge_results(single_result, node)
            # 汇总到总结果容器    
            merge_results(check_result, single_result)
        except Exception as e:
            logger.error(f"merge error: {e}")
        # 清理临时目录
        safe_rmtree(result_path)
    if not check_result:
        return {'success': True, 'message': "There is no bug in this change.", 'result': None}
    return {'success': True, 'result': check_result}


@app.route('/change_info', methods=['POST'])
def get_change_info():
    """
    接收前端 POST 请求，根据提供的 Gerrit Change-ID 查询详细信息并返回 JSON。
    路由: POST /change_info
    请求参数: form-data 中 key 为 submitId
    返回格式: JSON
        成功 -> {"success": true,  "message": <gerrit 数据>}
        失败 -> {"success": false, "message": "change_id is null"}, HTTP 400
    """
    change_id = request.form.get('submitId')
    if change_id is None:
        return jsonify({'success': False, 'message': "change_id is null"}), 400
    logger.info(f'Received Gerrit Change-id: {change_id}')
    data = get_change_info_from_gerrit(change_id)
    return jsonify({'success': True, 'message': data}), 200


@app.route('/account_info', methods=['POST'])
def get_account_info():
    """
    根据前端提交的 account_id 查询 Gerrit 账户详细信息并返回 JSON。
    路由: POST /account_info
    请求参数: form-data 中 key 为 account_id
    返回格式: JSON
        成功 -> {"success": true,  "message": <账户数据>}
        失败 -> {"success": false, "message": "account_id is null"}, HTTP 400
    """
    account_id = request.form.get('account_id')
    if account_id is None:
        return jsonify({'success': False, 'message': "account_id is null"}), 400
    logger.info(f'Received account_id: {account_id}')
    data = get_account_info_from_gerrit(account_id)
    return jsonify({'success': True, 'message': data}), 200



@app.route('/analyse', methods=['POST'])
def analyse():
    """
    接收前端提交的 change_id 与 options,触发代码分析工作流,返回检测结果。
    路由: POST /analyse
    请求参数:
        form-data:
            submitId  : str  - Gerrit change-id
            options   : str  - JSON 字符串，控制检测规则
    返回格式: JSON
        成功 -> {"success": true,  "message": <检测结果>}
        失败 -> {"success": false, "message": <错误信息>}, HTTP 400/404
    """
    change_id = request.form.get('submitId')
    if change_id is None:
        return jsonify({'success': False, 'message': "change_id is null"}), 400
    options = request.form.get('options')
    if options is None:
        return jsonify({'success': False, 'message': "options is null"}), 400
    options = json.loads(options)
    logger.info(f"receive change_id: {change_id}, options: {options}")
    
    result = submit_workflow_run(change_id, options)
    if result['success'] is False:
        if result['message'] == 'workflow time out':
            return jsonify({'success': False, 'message': 'workflow time out'}), 404
        result = {
            'critical_issues': [],
            'high_risk_issues': [],
            "code_quality_issues": []
        }
        return jsonify({'success': True, 'message': result}), 200
    else:
        if result['result'] == None:
            logger.warning(f"变更{change_id}处理结果为{result['message']}")
            result = {
                'critical_issues': [],
                'high_risk_issues': [],
                "code_quality_issues": []
            }
            return jsonify({'success': True, 'message': result}), 200
    return jsonify({'success': True, 'message': result['result']}), 200


@app.route('/analyse_test', methods=['POST'])
def analyse_test():
    """
    测试接口：接收 change_id 与 prompt, 触发一次带自定义提示词的代码分析工作流。
    路由: POST /analyse_test
    请求参数:
        form-data:
            submitId: str - Gerrit change-id
            prompt  : str - 自定义 LLM/规则提示词
    返回格式: JSON
        成功 -> {"success": true,  "message": <检测结果>}
        失败 -> {"success": false, "message": <错误信息>}, HTTP 400/404
    """
    change_id = request.form.get('submitId')
    if change_id is None:
        return jsonify({'success': False, 'message': "change_id is null"}), 400
    prompt = request.form.get('prompt')
    if prompt is None:
        return jsonify({'success': False, 'message': "prompt is null"}), 400
    
    options = dict()
    options['prompt'] = prompt

    result = submit_workflow_run(change_id, options)
    if result['success'] is False:
        if result['message'] == 'workflow time out':
            return jsonify({'success': False, 'message': 'workflow time out'}), 404
        result = {
            'critical_issues': [],
            'high_risk_issues': [],
            "code_quality_issues": []
        }
        return jsonify({'success': True, 'message': result}), 200
    else:
        if result['result'] == None:
            logger.warning(f"变更{change_id}处理结果为{result['message']}")
            result = {
                'critical_issues': [],
                'high_risk_issues': [],
                "code_quality_issues": []
            }
            return jsonify({'success': True, 'message': result}), 200
    return jsonify({'success': True, 'message': result['result']}), 200


if __name__ == '__main__':
    try:
        app_config.ensure_loaded()
        auth = HTTPBasicAuth(app_config.username, app_config.password)  # 获取auth信息
        gerrit_client = GerritRestAPI(url=app_config.gerrit_url + '/', auth=auth)  # 用auth信息去访问gerrit的rest接口

        # 初始化调度器
        init_scheduler()
        
        fetch_all_repositories()
        app.run(debug=False, host='0.0.0.0', port=7070, use_reloader=False)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()