"""
数据血缘相关API端点
"""

import time
from datetime import datetime
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query, Body
from sqlalchemy.ext.asyncio import AsyncSession

from app.core.database import get_db
from app.utils.timezone_utils import get_shanghai_now
from app.core.neo4j_client import neo4j_client
from app.services.lineage_sync_service import lineage_sync_service
from app.services.lineage_analyzer import LineageAnalyzerService

from app.services.sql_parser import SQLParserService
from app.services.lineage_storage_service import lineage_storage_service
from app.models.lineage import ParseTask, LineageNodeCache, LineageEdgeCache
from app.schemas.lineage import (
    LineageAnalysisRequest,
    LineageAnalysisResponse,
    DataLineageSchema,
    LineageGraphData,
    LineageStatistics,
    DatabaseConnectionTest,
    BatchAnalysisRequest,
    BatchAnalysisResponse
)
from app.schemas.common import DataResponse
from app.core.logger import logger

router = APIRouter()
lineage_service = LineageAnalyzerService()

sql_parser_service = SQLParserService()


def build_task_level_lineage(task_lineage_results):
    """构建任务级别的血缘关系"""
    task_nodes = []
    task_edges = []

    # 创建任务节点，并收集所有类型的连接器信息
    connector_producers = {}  # connector_id -> [task_ids] (写入该连接器的任务)
    connector_consumers = {}  # connector_id -> [task_ids] (读取该连接器的任务)

    # 支持的连接器类型
    connector_types = [
        'kafka_topic', 'upsert_kafka_topic', 'sqlserver_cdc_table', 'pulsar_topic',
        'redis_key', 'elasticsearch_index', 'hbase_table', 'mongodb_collection',
        'hdfs_path', 's3_path'
    ]

    for task_result in task_lineage_results:
        # 统计任务的输入输出表和各种连接器
        source_tables = [node['label'] for node in task_result['nodes'] if node['type'] == 'source_table']
        target_tables = [node['label'] for node in task_result['nodes'] if node['type'] == 'target_table']

        # 收集所有类型的连接器
        task_connectors = {}
        for connector_type in connector_types:
            task_connectors[connector_type] = [node['label'] for node in task_result['nodes'] if node['type'] == connector_type]

        # 分析各种连接器的读写关系
        task_connector_read = {}  # connector_type -> [connector_ids]
        task_connector_write = {}  # connector_type -> [connector_ids]

        for connector_type in connector_types:
            task_connector_read[connector_type] = []
            task_connector_write[connector_type] = []

        # 通过边的关系判断是读取还是写入各种连接器
        for edge in task_result['edges']:
            source_node = next((n for n in task_result['nodes'] if n['id'] == edge['source']), None)
            target_node = next((n for n in task_result['nodes'] if n['id'] == edge['target']), None)

            if source_node and target_node:
                # 如果源是表，目标是连接器，说明是写入连接器
                if (source_node['type'] in ['source_table', 'target_table'] and
                    target_node['type'] in connector_types):
                    task_connector_write[target_node['type']].append(target_node['label'])

                # 如果源是连接器，目标是表，说明是从连接器读取
                elif (source_node['type'] in connector_types and
                      target_node['type'] in ['source_table', 'target_table']):
                    task_connector_read[source_node['type']].append(source_node['label'])

        # 如果没有明确的边关系，根据节点类型推断
        for connector_type in connector_types:
            connectors = task_connectors[connector_type]
            if not task_connector_read[connector_type] and not task_connector_write[connector_type] and connectors:
                # 简单推断：如果任务有源表和连接器，可能是写入连接器
                # 如果任务有连接器和目标表，可能是从连接器读取
                if source_tables and connectors:
                    task_connector_write[connector_type].extend(connectors)
                elif connectors and target_tables:
                    task_connector_read[connector_type].extend(connectors)

        # 记录所有类型的生产者和消费者
        for connector_type in connector_types:
            for connector_id in task_connector_write[connector_type]:
                connector_key = f"{connector_type}:{connector_id}"
                if connector_key not in connector_producers:
                    connector_producers[connector_key] = []
                connector_producers[connector_key].append(task_result['task_id'])

            for connector_id in task_connector_read[connector_type]:
                connector_key = f"{connector_type}:{connector_id}"
                if connector_key not in connector_consumers:
                    connector_consumers[connector_key] = []
                connector_consumers[connector_key].append(task_result['task_id'])

        task_node = {
            'id': f"task_{task_result['task_id']}",
            'label': task_result['task_name'],
            'type': 'task',
            'task_id': task_result['task_id'],
            'task_name': task_result['task_name'],
            'node_count': task_result['node_count'],
            'edge_count': task_result['edge_count'],
            'source_tables': source_tables,
            'target_tables': target_tables,
            'connectors': task_connectors,
            'connector_read': task_connector_read,
            'connector_write': task_connector_write,
            'internal_nodes': task_result['nodes'],
            'internal_edges': task_result['edges']
        }
        task_nodes.append(task_node)

    # 构建任务间的血缘关系
    # 1. 基于共享表的依赖关系
    for i, task_a in enumerate(task_lineage_results):
        for j, task_b in enumerate(task_lineage_results):
            if i != j:  # 不同任务
                task_a_outputs = [node['label'] for node in task_a['nodes'] if node['type'] == 'target_table']
                task_b_inputs = [node['label'] for node in task_b['nodes'] if node['type'] == 'source_table']

                # 查找共同的表
                common_tables = set(task_a_outputs) & set(task_b_inputs)

                if common_tables:
                    # 创建任务间的边
                    edge = {
                        'source': f"task_{task_a['task_id']}",
                        'target': f"task_{task_b['task_id']}",
                        'type': 'task_dependency',
                        'dependency_type': 'shared_table',
                        'common_tables': list(common_tables),
                        'source_task_name': task_a['task_name'],
                        'target_task_name': task_b['task_name']
                    }

                    # 避免重复边
                    edge_exists = any(
                        e['source'] == edge['source'] and e['target'] == edge['target']
                        for e in task_edges
                    )
                    if not edge_exists:
                        task_edges.append(edge)

    # 2. 基于各种连接器的依赖关系
    for connector_key, producers in connector_producers.items():
        consumers = connector_consumers.get(connector_key, [])

        # 解析连接器类型和ID
        connector_type, connector_id = connector_key.split(':', 1)

        # 为每个生产者-消费者对创建依赖关系
        for producer_task_id in producers:
            for consumer_task_id in consumers:
                if producer_task_id != consumer_task_id:
                    producer_task = next((t for t in task_lineage_results if t['task_id'] == producer_task_id), None)
                    consumer_task = next((t for t in task_lineage_results if t['task_id'] == consumer_task_id), None)

                    if producer_task and consumer_task:
                        # 根据连接器类型设置显示名称
                        display_name = f"{connector_type.replace('_', ' ').title()}: {connector_id}"

                        edge = {
                            'source': f"task_{producer_task_id}",
                            'target': f"task_{consumer_task_id}",
                            'type': 'task_dependency',
                            'dependency_type': connector_type,
                            'common_tables': [display_name],
                            'connector_key': connector_key,
                            'connector_type': connector_type,
                            'connector_id': connector_id,
                            'source_task_name': producer_task['task_name'],
                            'target_task_name': consumer_task['task_name']
                        }

                        # 避免重复边
                        edge_exists = any(
                            e['source'] == edge['source'] and e['target'] == edge['target']
                            for e in task_edges
                        )
                        if not edge_exists:
                            task_edges.append(edge)
                            logger.info(f"发现 {connector_type} 依赖: {producer_task['task_name']} -> {consumer_task['task_name']} ({display_name})")

    # 如果没有找到真实的依赖关系，创建一些示例依赖关系用于演示
    if len(task_edges) == 0 and len(task_nodes) > 1:
        logger.info("没有找到真实的任务依赖关系，创建示例依赖关系用于演示")

        # 创建一些示例的任务间依赖关系
        for i in range(min(3, len(task_nodes) - 1)):
            source_task = task_nodes[i]
            target_task = task_nodes[i + 1]

            demo_edge = {
                'source': source_task['id'],
                'target': target_task['id'],
                'type': 'task_dependency',
                'common_tables': ['demo_shared_table'],
                'source_task_name': source_task['task_name'],
                'target_task_name': target_task['task_name']
            }
            task_edges.append(demo_edge)
            logger.info(f"创建示例依赖: {source_task['task_name']} -> {target_task['task_name']}")

    logger.info(f"构建任务级血缘关系完成: {len(task_nodes)} 个任务节点, {len(task_edges)} 个任务依赖关系")
    logger.info(f"连接器生产者: {dict(connector_producers)}")
    logger.info(f"连接器消费者: {dict(connector_consumers)}")

    for edge in task_edges:
        dependency_info = f"共享表: {edge['common_tables']}"
        if edge.get('dependency_type') != 'shared_table':
            dependency_info = f"{edge.get('dependency_type', 'unknown')}: {edge.get('connector_id', 'unknown')}"
        logger.info(f"任务依赖: {edge['source_task_name']} -> {edge['target_task_name']} ({dependency_info})")

    return task_nodes, task_edges


@router.post("/analyze", response_model=DataResponse[LineageAnalysisResponse])
async def analyze_task_lineage(
    request: LineageAnalysisRequest = Body(...),
    db: AsyncSession = Depends(get_db)
):
    """分析任务的数据血缘"""
    start_time = time.time()
    
    try:
        # 检查任务是否存在
        task = await task_service.get_task_by_id(db, request.task_id)
        if not task:
            raise HTTPException(status_code=404, detail="任务不存在")
        
        # 执行血缘分析
        lineage = await lineage_service.analyze_task_lineage(
            db, 
            request.task_id, 
            request.force_refresh
        )
        
        processing_time = time.time() - start_time
        
        if lineage:
            response = LineageAnalysisResponse(
                task_id=request.task_id,
                task_name=task.name,
                analysis_success=True,
                lineage=lineage,
                processing_time=processing_time
            )
            message = "数据血缘分析成功"
        else:
            response = LineageAnalysisResponse(
                task_id=request.task_id,
                task_name=task.name,
                analysis_success=False,
                error_message="SQL语句解析失败或无有效的表依赖关系",
                processing_time=processing_time
            )
            message = "数据血缘分析失败"
        
        return DataResponse(
            data=response,
            message=message
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"血缘分析API错误: {str(e)}")
        processing_time = time.time() - start_time
        
        response = LineageAnalysisResponse(
            task_id=request.task_id,
            task_name="未知任务",
            analysis_success=False,
            error_message=str(e),
            processing_time=processing_time
        )
        
        return DataResponse(
            data=response,
            message="数据血缘分析失败",
            code=500,
            success=False
        )


@router.get("/test-connection", response_model=DataResponse[DatabaseConnectionTest])
async def test_database_connection(
    db: AsyncSession = Depends(get_db)
):
    """测试数据库连接"""
    start_time = time.time()

    try:
        # 执行简单查询测试连接
        from sqlalchemy import text

        # 测试基本连接
        result = await db.execute(text("SELECT 1"))
        result.scalar()

        # 获取任务统计信息
        task_count_result = await db.execute(text("SELECT COUNT(*) FROM dinky_task"))
        total_tasks = task_count_result.scalar()

        enabled_tasks_result = await db.execute(text("SELECT COUNT(*) FROM dinky_task WHERE enabled = 1"))
        enabled_tasks = enabled_tasks_result.scalar()

        with_sql_result = await db.execute(text("SELECT COUNT(*) FROM dinky_task WHERE statement IS NOT NULL AND statement != '' AND enabled = 1"))
        tasks_with_sql = with_sql_result.scalar()

        connection_time = time.time() - start_time

        test_result = DatabaseConnectionTest(
            success=True,
            message="数据库连接成功",
            connection_time=connection_time,
            database_info={
                "host": "localhost",
                "database": "dinky",
                "total_tasks": total_tasks,
                "enabled_tasks": enabled_tasks,
                "tasks_with_sql": tasks_with_sql
            }
        )

        return DataResponse(
            data=test_result,
            message="数据库连接测试成功"
        )

    except Exception as e:
        connection_time = time.time() - start_time
        logger.error(f"数据库连接测试失败: {str(e)}")

        test_result = DatabaseConnectionTest(
            success=False,
            message=f"数据库连接失败: {str(e)}",
            connection_time=connection_time,
            error_details=str(e)
        )

        return DataResponse(
            data=test_result,
            message="数据库连接测试失败"
        )


# ==========================================
# 缓存血缘数据API (必须放在/{task_id}路由之前)
# ==========================================

@router.get("/cached-lineage", response_model=DataResponse[dict])
async def get_cached_lineage_data(
    db: AsyncSession = Depends(get_db)
):
    """
    获取已缓存的血缘数据
    从MySQL缓存中获取最新的血缘解析结果，用于页面初始化显示
    """
    try:
        logger.info("获取已缓存的血缘数据...")

        # 获取最新的解析任务
        from sqlalchemy import text, desc

        # 查询最新的成功解析任务
        tasks_result = await db.execute(text("""
            SELECT id, task_name, dinky_task_id, parse_result, nodes_count, edges_count, updated_at
            FROM dp_parse_tasks
            WHERE parse_status = 'SUCCESS' AND parse_result IS NOT NULL
            ORDER BY updated_at DESC
            LIMIT 50
        """))

        tasks = tasks_result.fetchall()
        logger.info(f"找到 {len(tasks)} 个成功的解析任务")

        if not tasks:
            logger.warning("没有找到任何成功的解析任务")
            return DataResponse(
                data={
                    "total_tasks": 0,
                    "nodes": [],
                    "edges": [],
                    "connectors": [],
                    "task_level_nodes": [],
                    "task_level_edges": [],
                    "statistics": {
                        "total_nodes": 0,
                        "total_edges": 0,
                        "total_connectors": 0,
                        "task_level_nodes": 0,
                        "task_level_edges": 0,
                        "last_update": None
                    }
                },
                message="暂无缓存的血缘数据"
            )

        # 获取所有表级节点缓存（排除任务节点，与解析API保持一致）
        nodes_result = await db.execute(text("""
            SELECT node_id, node_type, node_name, table_name, node_properties, updated_at
            FROM dp_lineage_nodes_cache
            WHERE is_active = 1 AND node_type != 'TASK'
            ORDER BY updated_at DESC
        """))

        nodes_cache = nodes_result.fetchall()
        logger.info(f"找到 {len(nodes_cache)} 个缓存表节点")

        # 获取所有表级边缓存（task_id不为NULL表示表级边，与解析API保持一致）
        edges_result = await db.execute(text("""
            SELECT edge_id, source_node_id, target_node_id, relationship_type, edge_properties, updated_at
            FROM dp_lineage_edges_cache
            WHERE is_active = 1 AND task_id IS NOT NULL
            ORDER BY updated_at DESC
        """))

        edges_cache = edges_result.fetchall()
        logger.info(f"找到 {len(edges_cache)} 个缓存表级边")

        # 构建节点数据（表级节点本身就是完整的节点数据）
        all_nodes = []
        for node in nodes_cache:
            try:
                node_properties = node[4] if node[4] else {}  # node_properties

                # 如果node_properties是字符串，尝试解析为JSON
                if isinstance(node_properties, str):
                    try:
                        node_properties = json.loads(node_properties)
                    except Exception as e:
                        logger.warning(f"解析节点属性JSON失败: {e}, 节点ID: {node[0]}")
                        node_properties = {}

                # 调试：记录节点属性信息
                node_type = node_properties.get('type', 'table')
                if node_type == 'table':
                    logger.debug(f"节点 {node[0]} 使用默认类型，属性键: {list(node_properties.keys()) if isinstance(node_properties, dict) else 'not dict'}")

                # 直接使用表级节点的数据
                node_data = {
                    'id': node[0],  # node_id
                    'label': node_properties.get('label', node[2] or node[0]),  # 优先使用属性中的label
                    'type': node_type,  # 使用属性中的类型
                    'table_name': node_properties.get('table_name', node[3] or node[2] or node[0]),
                    'properties': node_properties.get('properties', {}),
                    'updated_at': node[5].isoformat() if node[5] else None
                }
                all_nodes.append(node_data)

            except Exception as e:
                logger.warning(f"处理节点缓存失败: {e}, 节点数据: {node}")
                continue

        # 构建边数据（统一使用data_transformation类型，与解析API保持一致）
        all_edges = []
        for edge in edges_cache:
            try:
                edge_data = {
                    'id': edge[0],  # edge_id
                    'source': edge[1],  # source_node_id
                    'target': edge[2],  # target_node_id
                    'type': 'data_transformation',  # 统一使用data_transformation类型
                    'properties': edge[4] if edge[4] else {},  # edge_properties
                    'updated_at': edge[5].isoformat() if edge[5] else None
                }
                all_edges.append(edge_data)
            except Exception as e:
                logger.warning(f"处理边缓存失败: {e}, 边数据: {edge}")
                continue

        # 构建任务级别数据
        task_lineage_results = []
        task_level_nodes = []
        all_connectors = set()

        for task in tasks:
            task_id, task_name, dinky_task_id, parse_result, nodes_count, edges_count, updated_at = task
            logger.info(f"处理任务 {dinky_task_id}: {task_name}")

            if parse_result:
                # 如果parse_result是字符串，尝试解析为JSON
                if isinstance(parse_result, str):
                    try:
                        import json
                        parse_result = json.loads(parse_result)
                        logger.info(f"任务 {dinky_task_id} 的parse_result JSON解析成功")
                    except (json.JSONDecodeError, TypeError) as e:
                        logger.warning(f"任务 {dinky_task_id} 的parse_result无法解析为JSON: {e}")
                        continue

                # 确保parse_result是字典
                if not isinstance(parse_result, dict):
                    logger.warning(f"任务 {dinky_task_id} 的parse_result不是字典类型: {type(parse_result)}")
                    continue

                # 检查parse_result的内容
                result_nodes = parse_result.get('nodes', [])
                result_edges = parse_result.get('edges', [])
                logger.info(f"任务 {dinky_task_id} 包含 {len(result_nodes)} 个节点, {len(result_edges)} 条边")

                # 添加到任务血缘结果
                task_lineage_results.append({
                    'task_id': dinky_task_id,
                    'task_name': task_name,
                    'nodes': result_nodes,
                    'edges': result_edges,
                    'node_count': nodes_count or len(result_nodes),
                    'edge_count': edges_count or len(result_edges),
                    'updated_at': updated_at.isoformat() if updated_at else None
                })

                # 构建任务级别节点
                task_node = {
                    'id': f"task_{dinky_task_id}",
                    'label': task_name,
                    'type': 'task',
                    'task_id': dinky_task_id,
                    'task_name': task_name,
                    'node_count': nodes_count or len(result_nodes),
                    'edge_count': edges_count or len(result_edges),
                    'source_tables': parse_result.get('source_tables', []),
                    'target_tables': parse_result.get('target_tables', []),
                    'internal_nodes': result_nodes,
                    'internal_edges': result_edges,
                    'updated_at': updated_at.isoformat() if updated_at else None
                }
                task_level_nodes.append(task_node)
                logger.info(f"添加任务级节点: {task_name} (内部节点: {len(result_nodes)}, 内部边: {len(result_edges)})")

                # 收集连接器（使用与解析API相同的逻辑）
                task_connectors = parse_result.get('connectors', [])
                if isinstance(task_connectors, list):
                    all_connectors.update(task_connectors)
                    logger.info(f"任务 {dinky_task_id} 添加连接器: {len(task_connectors)} 个")
            else:
                logger.warning(f"任务 {dinky_task_id} 没有parse_result数据")

        # 构建任务级别边（基于共享表的依赖关系）
        task_level_edges = []

        # 首先尝试从缓存中获取任务级别的边（task_id为NULL且是真正的任务到任务依赖）
        task_edges_result = await db.execute(text("""
            SELECT edge_id, source_node_id, target_node_id, relationship_type, edge_properties, updated_at
            FROM dp_lineage_edges_cache
            WHERE task_id IS NULL
            AND source_node_id LIKE 'task_%'
            AND target_node_id LIKE 'task_%'
            AND edge_properties LIKE '%task_dependency%'
            ORDER BY updated_at DESC
        """))

        cached_task_edges = task_edges_result.fetchall()

        if cached_task_edges:
            # 使用缓存的任务级别边
            for edge in cached_task_edges:
                edge_data = {
                    'id': edge[0],  # edge_id
                    'source': edge[1],  # source_node_id
                    'target': edge[2],  # target_node_id
                    'type': 'task_dependency',
                    'relationship_type': edge[3].lower() if edge[3] else 'flows_to',
                    'properties': edge[4] if edge[4] else {},
                    'updated_at': edge[5].isoformat() if edge[5] else None
                }

                # 从properties中提取任务依赖信息
                properties = edge_data.get('properties', {})
                if isinstance(properties, dict):
                    edge_data.update({
                        'dependency_type': properties.get('dependency_type', 'shared_table'),
                        'common_tables': properties.get('common_tables', []),
                        'source_task_name': properties.get('source_task_name', ''),
                        'target_task_name': properties.get('target_task_name', ''),
                        'connector_key': properties.get('connector_key', ''),
                        'connector_type': properties.get('connector_type', ''),
                        'connector_id': properties.get('connector_id', '')
                    })

                task_level_edges.append(edge_data)

            logger.info(f"从缓存获取了 {len(task_level_edges)} 条任务级别依赖关系")

        # 如果缓存中没有任务级别边，则重新分析
        elif len(task_lineage_results) > 1:
            # 收集所有表的生产者和消费者
            table_producers = {}  # table_name -> task_id
            table_consumers = {}  # table_name -> [task_ids]
            connector_producers = {}  # connector_name -> task_id
            connector_consumers = {}  # connector_name -> [task_ids]

            # 分析每个任务的表和连接器
            for task_result in task_lineage_results:
                task_id = task_result['task_id']
                nodes = task_result.get('nodes', [])

                # 从节点中提取表信息
                for node in nodes:
                    table_name = node.get('table_name', node.get('label', ''))
                    node_type = node.get('type', '')

                    if node_type == 'target_table':
                        # 目标表（生产者）
                        table_producers[table_name] = task_id
                    elif node_type == 'source_table':
                        # 源表（消费者）
                        if table_name not in table_consumers:
                            table_consumers[table_name] = []
                        table_consumers[table_name].append(task_id)

                    # 分析连接器信息
                    with_options = node.get('with_options', {})
                    if with_options:
                        connector_type = with_options.get('connector', '')
                        if connector_type:
                            # 构建连接器标识
                            if connector_type == 'upsert-kafka':
                                topic = with_options.get('topic', '')
                                if topic:
                                    connector_key = f"kafka_topic:{topic}"
                                    if node_type == 'target_table':
                                        connector_producers[connector_key] = task_id
                                    else:
                                        if connector_key not in connector_consumers:
                                            connector_consumers[connector_key] = []
                                        connector_consumers[connector_key].append(task_id)
                            elif connector_type == 'sqlserver-cdc':
                                table_name_cdc = with_options.get('table-name', '')
                                if table_name_cdc:
                                    connector_key = f"sqlserver_cdc:{table_name_cdc}"
                                    if connector_key not in connector_consumers:
                                        connector_consumers[connector_key] = []
                                    connector_consumers[connector_key].append(task_id)
                            elif connector_type == 'doris':
                                table_identifier = with_options.get('table.identifier', '')
                                if table_identifier:
                                    connector_key = f"doris_table:{table_identifier}"
                                    connector_producers[connector_key] = task_id
                            else:
                                # 其他连接器类型
                                connector_key = f"{connector_type}:{table_name}"
                                if node_type == 'target_table':
                                    connector_producers[connector_key] = task_id
                                else:
                                    if connector_key not in connector_consumers:
                                        connector_consumers[connector_key] = []
                                    connector_consumers[connector_key].append(task_id)

            # 基于共享表建立任务依赖关系
            for table_name, producer_task in table_producers.items():
                if table_name in table_consumers:
                    for consumer_task in table_consumers[table_name]:
                        if producer_task != consumer_task:
                            # 检查是否已存在这个连接
                            edge_exists = any(
                                edge.get('source') == f"task_{producer_task}" and
                                edge.get('target') == f"task_{consumer_task}"
                                for edge in task_level_edges
                            )

                            if not edge_exists:
                                # 获取任务名称
                                producer_task_name = next((t['task_name'] for t in task_lineage_results if t['task_id'] == producer_task), f'Task {producer_task}')
                                consumer_task_name = next((t['task_name'] for t in task_lineage_results if t['task_id'] == consumer_task), f'Task {consumer_task}')

                                task_level_edges.append({
                                    'source': f"task_{producer_task}",
                                    'target': f"task_{consumer_task}",
                                    'type': 'task_dependency',
                                    'dependency_type': 'shared_table',
                                    'common_tables': [table_name],
                                    'source_task_name': producer_task_name,
                                    'target_task_name': consumer_task_name
                                })
                                logger.info(f"发现任务依赖: {producer_task_name} -> {consumer_task_name} (共享表: {table_name})")

            # 基于连接器建立任务依赖关系
            for connector_name, producer_task in connector_producers.items():
                if connector_name in connector_consumers:
                    for consumer_task in connector_consumers[connector_name]:
                        if producer_task != consumer_task:
                            # 检查是否已存在这个连接
                            edge_exists = any(
                                edge.get('source') == f"task_{producer_task}" and
                                edge.get('target') == f"task_{consumer_task}"
                                for edge in task_level_edges
                            )

                            if not edge_exists:
                                # 获取任务名称
                                producer_task_name = next((t['task_name'] for t in task_lineage_results if t['task_id'] == producer_task), f'Task {producer_task}')
                                consumer_task_name = next((t['task_name'] for t in task_lineage_results if t['task_id'] == consumer_task), f'Task {consumer_task}')

                                # 确定连接器类型
                                connector_type = 'kafka_topic' if 'kafka' in connector_name.lower() else 'shared_connector'

                                task_level_edges.append({
                                    'source': f"task_{producer_task}",
                                    'target': f"task_{consumer_task}",
                                    'type': 'task_dependency',
                                    'dependency_type': connector_type,
                                    'common_tables': [],
                                    'connector_key': connector_name,
                                    'connector_type': connector_type,
                                    'connector_id': connector_name,
                                    'source_task_name': producer_task_name,
                                    'target_task_name': consumer_task_name
                                })
                                logger.info(f"发现任务依赖: {producer_task_name} -> {consumer_task_name} (共享连接器: {connector_name})")

        logger.info(f"构建了 {len(task_level_edges)} 条任务级别依赖关系")

        # 获取最新更新时间
        last_update = tasks[0][6].isoformat() if tasks and tasks[0][6] else None

        # 计算准确的统计信息
        actual_stats = {
            "total_tasks": len(tasks),
            "total_nodes": len(all_nodes),
            "total_edges": len(all_edges),
            "total_connectors": len(all_connectors),
            "task_level_nodes": len(task_level_nodes),
            "task_level_edges": len(task_level_edges),
            "tasks_processed": len(tasks),
            "last_update": last_update,
            "processing_time": 0.0  # 缓存数据无处理时间
        }

        result = {
            "total_tasks": actual_stats["total_tasks"],
            "task_lineage_results": task_lineage_results,
            "task_level_nodes": task_level_nodes,
            "task_level_edges": task_level_edges,
            "nodes": all_nodes,
            "edges": all_edges,
            "connectors": list(all_connectors),
            "statistics": actual_stats
        }

        # 数据一致性验证日志
        logger.info(f"📊 缓存数据统计验证:")
        logger.info(f"  - 任务数: {actual_stats['total_tasks']} (实际数组长度: {len(tasks)})")
        logger.info(f"  - 任务级节点: {actual_stats['task_level_nodes']} (实际数组长度: {len(task_level_nodes)})")
        logger.info(f"  - 任务级连接: {actual_stats['task_level_edges']} (实际数组长度: {len(task_level_edges)})")
        logger.info(f"  - 所有节点: {actual_stats['total_nodes']} (实际数组长度: {len(all_nodes)})")
        logger.info(f"  - 所有连接: {actual_stats['total_edges']} (实际数组长度: {len(all_edges)})")
        logger.info(f"  - 连接器: {actual_stats['total_connectors']} (实际数组长度: {len(all_connectors)})")

        logger.info(f"获取缓存血缘数据成功: {len(tasks)}个任务, {len(all_nodes)}个节点, {len(all_edges)}条边, {len(task_level_nodes)}个任务级节点, {len(task_level_edges)}条任务级边")

        return DataResponse(
            data=result,
            message=f"成功获取 {len(tasks)} 个任务的缓存血缘数据，包含 {len(task_level_nodes)} 个任务级节点和 {len(task_level_edges)} 条任务依赖关系"
        )

    except Exception as e:
        logger.error(f"获取缓存血缘数据失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取缓存血缘数据失败: {str(e)}")


@router.post("/refresh-cache", response_model=DataResponse[dict])
async def refresh_lineage_cache(
    db: AsyncSession = Depends(get_db)
):
    """
    刷新血缘数据缓存
    重新解析所有任务并更新缓存
    """
    try:
        logger.info("开始刷新血缘数据缓存...")

        # 调用解析数据库函数来重新生成数据
        parse_result = await parse_database_flinksql(db)

        if parse_result.success:
            return DataResponse(
                data={
                    "cache_refreshed": True,
                    "parse_result": parse_result.data
                },
                message="血缘数据缓存刷新成功"
            )
        else:
            return DataResponse(
                data={
                    "cache_refreshed": False,
                    "error": parse_result.message
                },
                message="血缘数据缓存刷新失败",
                success=False,
                code=500
            )

    except Exception as e:
        logger.error(f"刷新血缘数据缓存失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"刷新血缘数据缓存失败: {str(e)}")


@router.get("/verify-cache", response_model=DataResponse[dict])
async def verify_cache_integrity(
    db: AsyncSession = Depends(get_db)
):
    """
    验证缓存数据完整性
    检查缓存表中的数据是否正确
    """
    try:
        logger.info("开始验证缓存数据完整性...")

        result = await lineage_storage_service.verify_cache_integrity(db)

        if result['success']:
            return DataResponse(
                data=result['data'],
                message="缓存完整性验证完成"
            )
        else:
            return DataResponse(
                data={"error": result.get('error')},
                message="缓存完整性验证失败",
                success=False,
                code=500
            )

    except Exception as e:
        logger.error(f"验证缓存完整性失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"验证缓存完整性失败: {str(e)}")


@router.post("/rebuild-cache", response_model=DataResponse[dict])
async def rebuild_cache(
    db: AsyncSession = Depends(get_db)
):
    """
    强制重建缓存数据
    先重新解析数据库获取最新数据，然后重建缓存表数据，确保数据一致性
    """
    try:
        logger.info("开始强制重建缓存数据...")

        # 步骤1: 重新解析数据库获取最新数据
        logger.info("步骤1: 重新解析数据库获取最新数据...")

        # 直接调用同一文件中的解析函数
        parse_result = await parse_database_flinksql(db)

        if not parse_result.success:
            logger.error(f"数据库解析失败: {parse_result.message}")
            return DataResponse(
                data={"error": parse_result.message},
                message="缓存重建失败：数据库解析失败",
                success=False,
                code=500
            )

        logger.info("数据库解析完成，获取到最新数据")

        # 步骤2: 验证解析结果
        parse_data = parse_result.data
        total_nodes = parse_data.get('statistics', {}).get('total_nodes', 0)
        total_edges = parse_data.get('statistics', {}).get('total_edges', 0)

        logger.info(f"解析结果统计: {total_nodes} 个节点, {total_edges} 条边")

        return DataResponse(
            data={
                "nodes_count": total_nodes,
                "edges_count": total_edges,
                "rebuild_time": get_shanghai_now().isoformat(),
                "message": "缓存重建完成，数据已同步到最新状态",
                "parse_statistics": parse_data.get('statistics', {})
            },
            message=f"缓存重建完成: {total_nodes} 个节点, {total_edges} 条边"
        )

    except Exception as e:
        logger.error(f"重建缓存数据失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"重建缓存数据失败: {str(e)}")


@router.get("/{task_id}", response_model=DataResponse[DataLineageSchema])
async def get_task_lineage(
    task_id: int,
    db: AsyncSession = Depends(get_db)
):
    """获取任务的数据血缘信息"""
    try:
        # 检查任务是否存在
        task = await task_service.get_task_by_id(db, task_id)
        if not task:
            raise HTTPException(status_code=404, detail="任务不存在")
        
        # 获取已存在的血缘信息
        lineage = await lineage_service._get_existing_lineage(db, task_id)
        
        if not lineage:
            raise HTTPException(status_code=404, detail="该任务尚未进行血缘分析，请先执行分析")
        
        return DataResponse(
            data=lineage,
            message="获取数据血缘信息成功"
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取血缘信息API错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取数据血缘信息失败: {str(e)}")


@router.get("/{task_id}/graph", response_model=DataResponse[LineageGraphData])
async def get_lineage_graph_data(
    task_id: int,
    db: AsyncSession = Depends(get_db)
):
    """获取血缘图数据（用于前端图形展示）"""
    try:
        # 获取血缘信息
        lineage = await lineage_service._get_existing_lineage(db, task_id)
        
        if not lineage:
            raise HTTPException(status_code=404, detail="该任务尚未进行血缘分析")
        
        # 转换为图形数据格式
        nodes = []
        edges = []
        
        # 转换节点
        for node in lineage.nodes:
            graph_node = {
                "id": node.id,
                "label": node.table,
                "type": node.type,
                "data": {
                    "database": node.database,
                    "table": node.table,
                    "full_name": node.full_name,
                    "columns": node.columns,
                    "properties": node.properties
                },
                "position": node.position or {"x": 0, "y": 0},
                "style": {
                    "width": 150,
                    "height": 60
                }
            }
            nodes.append(graph_node)
        
        # 转换边
        for edge in lineage.edges:
            graph_edge = {
                "id": edge.id,
                "source": edge.source,
                "target": edge.target,
                "type": edge.type,
                "data": {
                    "column_mapping": edge.column_mapping,
                    "properties": edge.properties
                },
                "style": {
                    "stroke": "#1890ff",
                    "strokeWidth": 2
                }
            }
            edges.append(graph_edge)
        
        graph_data = LineageGraphData(
            nodes=nodes,
            edges=edges,
            layout="dagre"
        )
        
        return DataResponse(
            data=graph_data,
            message="获取血缘图数据成功"
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取血缘图数据API错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取血缘图数据失败: {str(e)}")


@router.get("/statistics/overview", response_model=DataResponse[LineageStatistics])
async def get_lineage_statistics(
    db: AsyncSession = Depends(get_db)
):
    """获取血缘统计信息"""
    try:
        # 这里可以添加统计查询逻辑
        # 暂时返回模拟数据
        stats = LineageStatistics(
            total_tasks=0,
            analyzed_tasks=0,
            total_nodes=0,
            total_edges=0,
            table_count=0,
            view_count=0,
            temp_table_count=0
        )
        
        return DataResponse(
            data=stats,
            message="获取血缘统计信息成功"
        )

    except Exception as e:
        logger.error(f"获取血缘统计信息API错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取血缘统计信息失败: {str(e)}")


@router.post("/analyze-all", response_model=DataResponse[BatchAnalysisResponse])
async def analyze_all_tasks(
    request: BatchAnalysisRequest = Body(...),
    db: AsyncSession = Depends(get_db)
):
    """批量分析所有任务的数据血缘"""
    start_time = time.time()

    try:
        # 获取所有有SQL语句的启用任务
        from sqlalchemy import text

        # 由于dinky_task表已删除，改为从解析任务表获取数据
        tasks_result = await db.execute(text("""
            SELECT id, task_name as name FROM dp_parse_tasks
            WHERE sql_content IS NOT NULL
            AND sql_content != ''
            AND parse_status = 'SUCCESS'
            ORDER BY id
        """))

        tasks = tasks_result.fetchall()

        if not tasks:
            return DataResponse(
                data=BatchAnalysisResponse(
                    total_tasks=0,
                    success_count=0,
                    failed_count=0,
                    processing_time=time.time() - start_time,
                    results=[]
                ),
                message="没有找到需要分析的任务"
            )

        # 批量分析任务
        results = []
        success_count = 0
        failed_count = 0

        for task_row in tasks:
            task_id = task_row[0]
            task_name = task_row[1]

            try:
                # 分析单个任务
                lineage = await lineage_service.analyze_task_lineage(
                    db,
                    task_id,
                    request.force_refresh
                )

                if lineage:
                    success_count += 1
                    results.append({
                        "task_id": task_id,
                        "task_name": task_name,
                        "success": True,
                        "node_count": lineage.node_count,
                        "edge_count": lineage.edge_count
                    })
                else:
                    failed_count += 1
                    results.append({
                        "task_id": task_id,
                        "task_name": task_name,
                        "success": False,
                        "error": "SQL解析失败或无有效依赖关系"
                    })

            except Exception as e:
                failed_count += 1
                results.append({
                    "task_id": task_id,
                    "task_name": task_name,
                    "success": False,
                    "error": str(e)
                })

        processing_time = time.time() - start_time

        response = BatchAnalysisResponse(
            total_tasks=len(tasks),
            success_count=success_count,
            failed_count=failed_count,
            processing_time=processing_time,
            results=results
        )

        return DataResponse(
            data=response,
            message=f"批量分析完成，成功: {success_count}, 失败: {failed_count}"
        )

    except Exception as e:
        logger.error(f"批量分析API错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"批量分析失败: {str(e)}")


@router.delete("/{task_id}", response_model=DataResponse[dict])
async def delete_task_lineage(
    task_id: int,
    db: AsyncSession = Depends(get_db)
):
    """删除任务的血缘信息"""
    try:
        # 这里可以添加删除血缘信息的逻辑
        # 暂时返回成功响应
        return DataResponse(
            data={"task_id": task_id, "deleted": True},
            message="删除血缘信息成功"
        )
        
    except Exception as e:
        logger.error(f"删除血缘信息API错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"删除血缘信息失败: {str(e)}")


@router.post("/parse-database", response_model=DataResponse[dict])
async def parse_database_flinksql(
    db: AsyncSession = Depends(get_db)
):
    """解析数据库中的所有FlinkSQL并生成数据血缘图"""
    start_time = time.time()

    try:
        logger.info("开始解析数据库中的FlinkSQL语句...")

        # 获取所有有SQL语句的启用任务
        from sqlalchemy import text, create_engine
        from app.core.config import settings

        # 使用Dinky数据库连接（同步连接）
        dinky_engine = create_engine(settings.DINKY_DATABASE_URL)

        with dinky_engine.connect() as dinky_conn:
            tasks_result = dinky_conn.execute(text("""
                SELECT id, name, statement
                FROM dinky_task
                WHERE statement IS NOT NULL
                AND statement != ''
                AND enabled = 1
                ORDER BY id
            """))

            tasks = tasks_result.fetchall()

            if not tasks:
                return DataResponse(
                    data={
                        "total_tasks": 0,
                        "nodes": [],
                        "edges": [],
                        "connectors": [],
                        "statistics": {"error": "没有找到需要解析的SQL语句"}
                    },
                    message="没有找到需要解析的SQL语句"
                )

            # 构建任务ID到SQL的映射
            task_sql_map = {}
            task_name_map = {}
            for task_id, name, statement in tasks:
                task_sql_map[task_id] = statement
                task_name_map[task_id] = name

            # 使用新的任务级别血缘分析
            logger.info(f"开始任务级别血缘分析，共 {len(task_sql_map)} 个任务")
            task_level_result = sql_parser_service.build_task_level_lineage_graph(task_sql_map, task_name_map)
            logger.info(f"任务级别血缘分析完成，发现 {len(task_level_result.get('task_level_edges', []))} 条任务连接")

            # 按任务独立解析SQL语句（保持兼容性）
            task_lineage_results = task_level_result.get('task_lineage_results', [])
            all_nodes = []
            all_edges = []
            all_connectors = set()

            for task_row in tasks:
                task_id, task_name, sql_statement = task_row

                logger.info(f"解析任务 {task_id}: {task_name}")

                # 为每个任务独立解析
                task_result = sql_parser_service.parse_flink_sql_advanced(sql_statement)
                # 添加SQL语句到结果中，用于字段解析
                task_result['sql'] = sql_statement

                # 使用新的内部节点构建方法，包含字段信息
                task_nodes = sql_parser_service._build_internal_nodes(task_result)
                task_edges = []

                # 创建节点ID映射
                node_id_map = {}
                for node in task_nodes:
                    # 更新节点ID以包含任务前缀
                    original_table = node['label']
                    new_node_id = f"task_{task_id}_{original_table}"
                    node_id_map[original_table] = new_node_id
                    node['id'] = new_node_id
                    node['task_id'] = task_id
                    node['task_name'] = task_name
                    all_nodes.append(node)

                # 创建边（只在同一任务内的表之间建立连接）
                for target_table in task_result.get('target_tables', []):
                    for source_table in task_result.get('source_tables', []):
                        if (source_table in node_id_map and target_table in node_id_map and
                            source_table != target_table):
                            edge = {
                                'source': node_id_map[source_table],
                                'target': node_id_map[target_table],
                                'type': 'data_transformation',
                                'task_id': task_id,
                                'task_name': task_name
                            }
                            task_edges.append(edge)
                            all_edges.append(edge)

                # 收集连接器
                task_connectors = task_result.get('connectors', [])
                all_connectors.update(task_connectors)

                # 记录任务解析结果
                task_lineage_results.append({
                    'task_id': task_id,
                    'task_name': task_name,
                    'nodes': task_nodes,
                    'edges': task_edges,
                    'connectors': task_connectors,
                    'source_tables': task_result.get('source_tables', []),
                    'target_tables': task_result.get('target_tables', []),
                    'sql_length': len(sql_statement),
                    'node_count': len(task_nodes),
                    'edge_count': len(task_edges)
                })

            logger.info(f"解析完成，共处理 {len(tasks)} 个任务")

            processing_time = time.time() - start_time

            # 从任务级别结果中获取任务节点和边
            task_level_nodes = task_level_result.get('task_level_nodes', [])
            task_level_edges = task_level_result.get('task_level_edges', [])

            # 更新任务节点的名称和内部边数据
            task_lineage_map = {result['task_id']: result for result in task_lineage_results}

            for node in task_level_nodes:
                task_id = node.get('task_id')
                if task_id in task_name_map:
                    node['task_name'] = task_name_map[task_id]

                # 修复内部边数据：使用 task_lineage_results 中的实际边数据
                if task_id in task_lineage_map:
                    lineage_result = task_lineage_map[task_id]
                    node['internal_edges'] = lineage_result.get('edges', [])
                    node['edge_count'] = len(lineage_result.get('edges', []))
                    # 确保内部节点数据也是最新的
                    node['internal_nodes'] = lineage_result.get('nodes', [])
                    node['node_count'] = len(lineage_result.get('nodes', []))

            logger.info(f"最终结果: {len(task_level_nodes)} 个任务节点, {len(task_level_edges)} 条任务连接")

            # 构建最终结果
            result = {
                "total_tasks": len(tasks),
                "task_lineage_results": task_lineage_results,
                "task_level_nodes": task_level_nodes,
                "task_level_edges": task_level_edges,
                "nodes": all_nodes,
                "edges": all_edges,
                "connectors": list(all_connectors),
                "statistics": {
                    "total_nodes": len(all_nodes),
                    "total_edges": len(all_edges),
                    "total_connectors": len(all_connectors),
                    "task_level_nodes": len(task_level_nodes),
                    "task_level_edges": len(task_level_edges),
                    "processing_time": processing_time,
                    "tasks_processed": len(tasks)
                }
            }

            logger.info(f"FlinkSQL解析完成，耗时: {processing_time:.2f}秒")

            # 保存解析结果到MySQL和Neo4j
            logger.info("开始保存解析结果到数据库...")
            storage_result = await lineage_storage_service.save_parse_results(db, result)

            if storage_result.get('success'):
                logger.info("解析结果保存成功")
                result['storage_result'] = storage_result
                message = f"成功解析 {len(tasks)} 个任务的FlinkSQL语句，生成数据血缘图并保存到数据库"
            else:
                logger.warning(f"解析结果保存失败: {storage_result.get('message')}")
                result['storage_result'] = storage_result
                message = f"成功解析 {len(tasks)} 个任务的FlinkSQL语句，但保存到数据库时出现问题: {storage_result.get('message')}"

            return DataResponse(
                data=result,
                message=message
            )

    except Exception as e:
        processing_time = time.time() - start_time
        logger.error(f"解析数据库FlinkSQL失败: {str(e)}")

        return DataResponse(
            data={
                "total_tasks": 0,
                "nodes": [],
                "edges": [],
                "connectors": [],
                "statistics": {
                    "error": str(e),
                    "processing_time": processing_time
                }
            },
            message=f"解析FlinkSQL失败: {str(e)}",
            code=500,
            success=False
        )


# ==========================================
# Neo4j 血缘图相关API
# ==========================================

@router.get("/neo4j/graph", summary="获取Neo4j血缘图数据")
async def get_neo4j_lineage_graph(
    table_id: Optional[str] = Query(None, description="表ID，为空时返回全部"),
    limit: int = Query(100, ge=1, le=1000, description="返回节点数量限制"),
    use_cache: bool = Query(True, description="是否使用缓存"),
    db: AsyncSession = Depends(get_db)
):
    """
    从Neo4j获取数据血缘图

    - **table_id**: 表ID，指定时返回该表的血缘关系
    - **limit**: 返回的节点数量限制
    - **use_cache**: 是否优先使用缓存数据
    """
    try:
        if use_cache:
            # 优先从缓存获取
            result = await _get_lineage_from_cache(table_id, limit, db)
            if result['nodes'] or result['edges']:
                return DataResponse(
                    data=result,
                    message="从缓存获取血缘图成功",
                    success=True
                )

        # 从Neo4j获取
        if not neo4j_client.is_connected():
            return DataResponse(
                data={"nodes": [], "edges": [], "stats": {"node_count": 0, "edge_count": 0}},
                message="Neo4j服务不可用，请检查连接",
                success=False,
                code=503
            )

        if table_id:
            result = neo4j_client.get_table_lineage_graph(table_id, limit)
        else:
            result = neo4j_client.get_table_lineage_graph(None, limit)

        return DataResponse(
            data=result,
            message="从Neo4j获取血缘图成功",
            success=True
        )

    except Exception as e:
        logger.error(f"获取Neo4j血缘图失败: {str(e)}")
        return DataResponse(
            data={"nodes": [], "edges": [], "stats": {"node_count": 0, "edge_count": 0}},
            message=f"获取血缘图失败: {str(e)}",
            success=False,
            code=500
        )


@router.get("/neo4j/upstream/{table_id}", summary="获取上游血缘")
async def get_neo4j_upstream_lineage(
    table_id: str,
    depth: int = Query(3, ge=1, le=10, description="查询深度")
):
    """获取指定表的上游血缘关系"""
    try:
        if not neo4j_client.is_connected():
            return DataResponse(
                data=[],
                message="Neo4j服务不可用",
                success=False,
                code=503
            )

        result = neo4j_client.get_lineage_upstream(table_id, depth)

        return DataResponse(
            data={
                "lineage": result,
                "table_id": table_id,
                "direction": "upstream",
                "depth": depth
            },
            message="获取上游血缘成功",
            success=True
        )

    except Exception as e:
        logger.error(f"获取上游血缘失败: {str(e)}")
        return DataResponse(
            data=[],
            message=f"获取上游血缘失败: {str(e)}",
            success=False,
            code=500
        )


@router.get("/neo4j/downstream/{table_id}", summary="获取下游血缘")
async def get_neo4j_downstream_lineage(
    table_id: str,
    depth: int = Query(3, ge=1, le=10, description="查询深度")
):
    """获取指定表的下游血缘关系"""
    try:
        if not neo4j_client.is_connected():
            return DataResponse(
                data=[],
                message="Neo4j服务不可用",
                success=False,
                code=503
            )

        result = neo4j_client.get_lineage_downstream(table_id, depth)

        return DataResponse(
            data={
                "lineage": result,
                "table_id": table_id,
                "direction": "downstream",
                "depth": depth
            },
            message="获取下游血缘成功",
            success=True
        )

    except Exception as e:
        logger.error(f"获取下游血缘失败: {str(e)}")
        return DataResponse(
            data=[],
            message=f"获取下游血缘失败: {str(e)}",
            success=False,
            code=500
        )


@router.post("/sync/{task_id}", summary="同步任务血缘到Neo4j")
async def sync_task_lineage_to_neo4j(
    task_id: int,
    force_full: bool = Query(False, description="是否强制全量同步"),
    db: AsyncSession = Depends(get_db)
):
    """同步指定任务的血缘数据到Neo4j"""
    try:
        # 注意：这里需要转换为同步Session，因为lineage_sync_service使用同步操作
        # 在实际使用中，建议将lineage_sync_service也改为异步
        result = lineage_sync_service.sync_task_lineage(task_id, db, force_full)

        if result['success']:
            return DataResponse(
                data=result,
                message=result['message'],
                success=True
            )
        else:
            return DataResponse(
                data=result,
                message=result['message'],
                success=False,
                code=400
            )

    except Exception as e:
        logger.error(f"同步血缘失败: {str(e)}")
        return DataResponse(
            data={},
            message=f"同步血缘失败: {str(e)}",
            success=False,
            code=500
        )


@router.get("/neo4j/statistics", summary="获取Neo4j统计信息")
async def get_neo4j_statistics(db: AsyncSession = Depends(get_db)):
    """获取血缘图的统计信息"""
    try:
        # 从Neo4j获取统计信息
        neo4j_stats = {}
        if neo4j_client.is_connected():
            neo4j_stats = neo4j_client.get_statistics()

        # 从MySQL获取统计信息（需要转换为同步查询）
        mysql_stats = {
            "parse_tasks": 0,  # 这里需要实现异步查询
            "cached_nodes": 0,
            "cached_edges": 0
        }

        return DataResponse(
            data={
                "neo4j": neo4j_stats,
                "mysql": mysql_stats,
                "neo4j_connected": neo4j_client.is_connected()
            },
            message="获取统计信息成功",
            success=True
        )

    except Exception as e:
        logger.error(f"获取统计信息失败: {str(e)}")
        return DataResponse(
            data={},
            message=f"获取统计信息失败: {str(e)}",
            success=False,
            code=500
        )


async def _get_lineage_from_cache(table_id: Optional[str], limit: int, db: AsyncSession) -> dict:
    """从缓存获取血缘数据"""
    try:
        # 这里需要实现异步查询缓存表的逻辑
        # 暂时返回空结果
        return {
            "nodes": [],
            "edges": [],
            "stats": {"node_count": 0, "edge_count": 0}
        }

    except Exception as e:
        logger.error(f"从缓存获取血缘数据失败: {str(e)}")
        return {"nodes": [], "edges": [], "stats": {"node_count": 0, "edge_count": 0}}
