"""
数据血缘存储服务
负责将解析的血缘数据保存到MySQL和Neo4j
"""

import hashlib
import json
from typing import Dict, List, Any, Optional
from datetime import datetime
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, delete, func, text
from sqlalchemy.dialects.mysql import insert

from app.core.database import get_db
from app.core.neo4j_client import neo4j_client
from app.core.logger import logger
from app.models.lineage import (
    ParseTask, LineageSyncLog,
    LineageNodeCache, LineageEdgeCache,
    TaskType, ParseStatus, SyncType, OperationType, SyncStatus,
    NodeType, RelationshipType
)
from app.utils.timezone_utils import get_shanghai_now


class LineageStorageService:
    """血缘数据存储服务类"""
    
    def __init__(self):
        self.neo4j_client = neo4j_client
    
    async def save_parse_results(self, db: AsyncSession, parse_results: Dict) -> Dict:
        """保存解析结果到MySQL和Neo4j"""
        mysql_result = None
        neo4j_result = None
        sync_result = None

        try:
            logger.info("开始保存解析结果到数据库...")

            # 1. 清理旧的缓存数据
            logger.info("步骤1: 清理旧的缓存数据...")
            await self._clear_old_cache(db)
            await db.commit()  # 立即提交清理操作
            logger.info("旧缓存数据清理完成并已提交")

            # 2. 保存任务信息到MySQL
            logger.info("步骤2: 保存任务信息到MySQL...")
            mysql_result = await self._save_to_mysql(db, parse_results)
            await db.commit()  # 立即提交任务信息
            logger.info("任务信息保存完成并已提交")

            # 3. 保存全局节点和边缓存
            logger.info("步骤3: 保存全局节点和边缓存...")
            await self._save_global_cache(db, parse_results)
            await db.commit()  # 立即提交缓存数据
            logger.info("缓存数据保存完成并已提交")

            # 4. 保存血缘图到Neo4j（独立操作，不影响MySQL事务）
            logger.info("步骤4: 保存血缘图到Neo4j...")
            try:
                neo4j_result = await self._save_to_neo4j(parse_results)
                logger.info("Neo4j数据保存完成")
            except Exception as neo4j_error:
                logger.error(f"Neo4j保存失败，但MySQL数据已保存: {str(neo4j_error)}")
                neo4j_result = {
                    'success': False,
                    'message': f'Neo4j保存失败: {str(neo4j_error)}',
                    'error': str(neo4j_error)
                }

            # 5. 记录同步日志
            logger.info("步骤5: 记录同步日志...")
            try:
                sync_result = await self._record_sync_log(db, parse_results, mysql_result, neo4j_result)
                await db.commit()  # 提交日志记录
                logger.info("同步日志记录完成")
            except Exception as log_error:
                logger.error(f"同步日志记录失败: {str(log_error)}")
                sync_result = {
                    'success': False,
                    'message': f'日志记录失败: {str(log_error)}'
                }

            logger.info("解析结果保存流程完成")

            # 判断整体成功状态
            overall_success = (
                mysql_result and mysql_result.get('success', False) and
                neo4j_result and neo4j_result.get('success', False)
            )

            return {
                'success': overall_success,
                'message': '数据保存完成' if overall_success else '数据保存部分成功',
                'mysql_result': mysql_result,
                'neo4j_result': neo4j_result,
                'sync_result': sync_result,
                'details': {
                    'mysql_success': mysql_result.get('success', False) if mysql_result else False,
                    'neo4j_success': neo4j_result.get('success', False) if neo4j_result else False,
                    'sync_success': sync_result.get('success', False) if sync_result else False
                }
            }

        except Exception as e:
            logger.error(f"保存解析结果失败: {str(e)}")
            # 不要回滚，因为我们希望保留已成功保存的数据
            return {
                'success': False,
                'message': f'数据保存失败: {str(e)}',
                'error': str(e),
                'mysql_result': mysql_result,
                'neo4j_result': neo4j_result,
                'sync_result': sync_result
            }
    
    async def _save_to_mysql(self, db: AsyncSession, parse_results: Dict) -> Dict:
        """保存解析结果到MySQL"""
        try:
            logger.info("保存数据到MySQL...")

            saved_tasks = []

            # 遍历每个任务的解析结果
            for task_result in parse_results.get('task_lineage_results', []):
                task_id = task_result['task_id']
                task_name = task_result['task_name']

                # 构建完整的解析结果，包含连接器信息
                complete_result = {
                    **task_result,
                    'connectors': task_result.get('connectors', []),
                    'source_tables': task_result.get('source_tables', []),
                    'target_tables': task_result.get('target_tables', []),
                    'statistics': {
                        'node_count': task_result.get('node_count', 0),
                        'edge_count': task_result.get('edge_count', 0),
                        'connector_count': len(task_result.get('connectors', []))
                    }
                }

                # 检查任务是否已存在
                existing_task = await db.execute(
                    select(ParseTask).where(ParseTask.dinky_task_id == task_id)
                )
                task = existing_task.scalar_one_or_none()

                if task:
                    # 更新现有任务
                    task.task_name = task_name
                    task.parse_status = ParseStatus.SUCCESS
                    task.parse_result = complete_result
                    task.nodes_count = task_result.get('node_count', 0)
                    task.edges_count = task_result.get('edge_count', 0)
                    task.updated_at = get_shanghai_now()
                else:
                    # 创建新任务
                    task = ParseTask(
                        task_name=task_name,
                        dinky_task_id=task_id,
                        task_type=TaskType.INCREMENTAL,
                        parse_status=ParseStatus.SUCCESS,
                        parse_result=complete_result,
                        nodes_count=task_result.get('node_count', 0),
                        edges_count=task_result.get('edge_count', 0),
                        created_at=get_shanghai_now(),
                        updated_at=get_shanghai_now()
                    )
                    db.add(task)

                await db.flush()  # 获取任务ID
                saved_tasks.append({
                    'task_id': task.id,
                    'dinky_task_id': task_id,
                    'task_name': task_name,
                    'nodes_count': task.nodes_count,
                    'edges_count': task.edges_count
                })

            await db.commit()

            logger.info(f"MySQL保存完成，共保存 {len(saved_tasks)} 个任务")

            return {
                'success': True,
                'saved_tasks': saved_tasks,
                'total_tasks': len(saved_tasks)
            }

        except Exception as e:
            logger.error(f"MySQL保存失败: {str(e)}")
            # 不要在这里回滚，让上层方法处理
            raise

    async def _clear_old_cache(self, db: AsyncSession):
        """清理旧的缓存数据"""
        try:
            logger.info("清理旧的缓存数据...")

            # 删除旧的节点缓存
            result_nodes = await db.execute(delete(LineageNodeCache))
            deleted_nodes = result_nodes.rowcount
            logger.info(f"删除了 {deleted_nodes} 个旧的节点缓存")

            # 删除旧的边缓存（包括任务级边）
            result_edges = await db.execute(delete(LineageEdgeCache))
            deleted_edges = result_edges.rowcount
            logger.info(f"删除了 {deleted_edges} 个旧的边缓存（包括任务级边）")

            # 验证清理结果
            remaining_nodes_result = await db.execute(select(func.count(LineageNodeCache.id)))
            remaining_nodes_count = remaining_nodes_result.scalar()

            remaining_edges_result = await db.execute(select(func.count(LineageEdgeCache.id)))
            remaining_edges_count = remaining_edges_result.scalar()

            logger.info(f"清理后剩余: 节点={remaining_nodes_count}, 边={remaining_edges_count}")

            if remaining_nodes_count > 0 or remaining_edges_count > 0:
                logger.warning("缓存清理不完整，可能存在约束问题")

            logger.info("旧缓存数据清理完成")

        except Exception as e:
            logger.error(f"清理缓存失败: {str(e)}")
            raise

    async def _save_global_cache(self, db: AsyncSession, parse_results: Dict):
        """保存全局节点和边缓存，确保数据一致性"""
        try:
            logger.info("保存全局缓存数据...")

            # 收集所有节点ID用于验证边的一致性
            all_node_ids = set()

            # 1. 保存全局节点缓存
            all_nodes = parse_results.get('nodes', [])
            if all_nodes:
                logger.info(f"保存 {len(all_nodes)} 个全局节点到缓存")
                saved_node_ids = await self._save_node_cache_with_validation(db, 0, all_nodes)
                all_node_ids.update(saved_node_ids)

            # 2. 保存任务级别节点缓存
            task_level_nodes = parse_results.get('task_level_nodes', [])
            if task_level_nodes:
                logger.info(f"保存 {len(task_level_nodes)} 个任务级别节点到缓存")
                saved_task_node_ids = await self._save_node_cache_with_validation(db, 0, task_level_nodes)
                all_node_ids.update(saved_task_node_ids)

            # 3. 保存全局边缓存（表级边，设置task_id）
            all_edges = parse_results.get('edges', [])
            if all_edges:
                logger.info(f"验证并保存 {len(all_edges)} 条表级边到缓存")
                await self._save_table_edge_cache_with_validation(db, all_edges, all_node_ids)

            # 4. 保存任务级别边缓存（任务级边，task_id为NULL）
            task_level_edges = parse_results.get('task_level_edges', [])
            if task_level_edges:
                logger.info(f"验证并保存 {len(task_level_edges)} 条任务级别边到缓存")
                await self._save_task_edge_cache_with_validation(db, task_level_edges, all_node_ids)

            logger.info("全局缓存数据保存完成")

        except Exception as e:
            logger.error(f"保存全局缓存失败: {str(e)}")
            raise

    async def _save_node_cache(self, db: AsyncSession, task_id: int, nodes: List[Dict]):
        """保存节点缓存"""
        try:
            saved_count = 0
            # 批量保存节点缓存，使用ON DUPLICATE KEY UPDATE
            for node in nodes:
                # 检查必要字段
                if not isinstance(node, dict):
                    logger.warning(f"跳过无效节点: {node}")
                    continue

                # 获取节点标签，提供默认值
                node_label = node.get('label') or node.get('name') or node.get('id') or f'node_{hash(str(node))}'
                node_id = node.get('id', node_label)

                # 映射节点类型
                node_type_str = node.get('type', 'table').upper()
                if node_type_str == 'SOURCE_TABLE' or node_type_str == 'TARGET_TABLE' or node_type_str == 'TEMP_TABLE':
                    node_type = NodeType.TABLE
                elif node_type_str == 'TASK':
                    node_type = NodeType.TASK
                else:
                    node_type = NodeType.TABLE  # 默认为表类型

                # 使用MySQL的INSERT ... ON DUPLICATE KEY UPDATE
                stmt = insert(LineageNodeCache).values(
                    node_id=node_id,
                    node_type=node_type,
                    node_name=node_label,
                    table_name=node.get('table_name', node_label),
                    node_properties=node,
                    last_updated_task_id=task_id,
                    created_at=get_shanghai_now(),
                    updated_at=get_shanghai_now()
                )

                # 在重复时更新
                stmt = stmt.on_duplicate_key_update(
                    node_name=stmt.inserted.node_name,
                    table_name=stmt.inserted.table_name,
                    node_properties=stmt.inserted.node_properties,
                    last_updated_task_id=stmt.inserted.last_updated_task_id,
                    updated_at=stmt.inserted.updated_at
                )

                await db.execute(stmt)
                saved_count += 1

            logger.info(f"成功保存 {saved_count} 个节点缓存")

        except Exception as e:
            logger.error(f"保存节点缓存失败: {str(e)}")
            raise

    async def _save_node_cache_with_validation(self, db: AsyncSession, task_id: int, nodes: List[Dict]) -> set:
        """保存节点缓存并返回保存的节点ID集合"""
        try:
            saved_count = 0
            saved_node_ids = set()

            # 批量保存节点缓存，使用ON DUPLICATE KEY UPDATE
            for node in nodes:
                # 检查必要字段
                if not isinstance(node, dict):
                    logger.warning(f"跳过无效节点: {node}")
                    continue

                # 获取节点标签，提供默认值
                node_label = node.get('label') or node.get('name') or node.get('id') or f'node_{hash(str(node))}'
                node_id = node.get('id', node_label)

                # 验证节点ID不为空
                if not node_id or node_id.strip() == '':
                    logger.warning(f"跳过空节点ID的节点: {node}")
                    continue

                # 映射节点类型
                node_type_str = node.get('type', 'table').upper()
                if node_type_str == 'SOURCE_TABLE' or node_type_str == 'TARGET_TABLE' or node_type_str == 'TEMP_TABLE':
                    node_type = NodeType.TABLE
                elif node_type_str == 'TASK':
                    node_type = NodeType.TASK
                else:
                    node_type = NodeType.TABLE  # 默认为表类型

                # 使用MySQL的INSERT ... ON DUPLICATE KEY UPDATE
                stmt = insert(LineageNodeCache).values(
                    node_id=node_id,
                    node_type=node_type,
                    node_name=node_label,
                    table_name=node.get('table_name', node_label),
                    node_properties=node,
                    last_updated_task_id=task_id,
                    created_at=get_shanghai_now(),
                    updated_at=get_shanghai_now()
                )

                # 在重复时更新
                stmt = stmt.on_duplicate_key_update(
                    node_name=stmt.inserted.node_name,
                    table_name=stmt.inserted.table_name,
                    node_properties=stmt.inserted.node_properties,
                    last_updated_task_id=stmt.inserted.last_updated_task_id,
                    updated_at=stmt.inserted.updated_at
                )

                await db.execute(stmt)
                saved_count += 1
                saved_node_ids.add(node_id)
                logger.debug(f"保存节点缓存: {node_id} ({node_type_str})")

            logger.info(f"成功保存 {saved_count} 个节点缓存，节点ID: {len(saved_node_ids)} 个")
            return saved_node_ids

        except Exception as e:
            logger.error(f"保存节点缓存失败: {str(e)}")
            raise
    
    async def _save_edge_cache(self, db: AsyncSession, task_id: int, edges: List[Dict]):
        """保存边缓存"""
        try:
            # 批量保存边缓存，使用ON DUPLICATE KEY UPDATE
            for edge in edges:
                # 检查必要字段
                if not isinstance(edge, dict):
                    logger.warning(f"跳过无效边: {edge}")
                    continue

                source_id = edge.get('source') or edge.get('from') or 'unknown_source'
                target_id = edge.get('target') or edge.get('to') or 'unknown_target'

                # 映射关系类型
                edge_type_str = edge.get('type', 'data_flow').upper()
                if edge_type_str == 'DATA_TRANSFORMATION' or edge_type_str == 'DATA_FLOW':
                    relationship_type = RelationshipType.FLOWS_TO
                elif edge_type_str == 'DEPENDS_ON':
                    relationship_type = RelationshipType.DEPENDS_ON
                else:
                    relationship_type = RelationshipType.FLOWS_TO  # 默认为数据流

                edge_id = edge.get('id', f"{source_id}_{target_id}")

                # 使用MySQL的INSERT ... ON DUPLICATE KEY UPDATE
                stmt = insert(LineageEdgeCache).values(
                    edge_id=edge_id,
                    source_node_id=source_id,
                    target_node_id=target_id,
                    relationship_type=relationship_type,
                    task_id=task_id,
                    edge_properties=edge,
                    created_at=get_shanghai_now(),
                    updated_at=get_shanghai_now()
                )

                # 在重复时更新
                stmt = stmt.on_duplicate_key_update(
                    source_node_id=stmt.inserted.source_node_id,
                    target_node_id=stmt.inserted.target_node_id,
                    relationship_type=stmt.inserted.relationship_type,
                    task_id=stmt.inserted.task_id,
                    edge_properties=stmt.inserted.edge_properties,
                    updated_at=stmt.inserted.updated_at
                )

                await db.execute(stmt)
                
        except Exception as e:
            logger.error(f"保存边缓存失败: {str(e)}")
            raise

    async def _save_edge_cache_without_task_id(self, db: AsyncSession, edges: List[Dict]):
        """保存边缓存（不使用task_id外键，用于全局和任务级别缓存）- 避免重复"""
        try:
            saved_count = 0
            updated_count = 0
            skipped_count = 0

            logger.info(f"开始保存 {len(edges)} 条边缓存（无任务ID）...")

            # 批量保存边缓存，使用ON DUPLICATE KEY UPDATE
            for edge in edges:
                # 检查必要字段
                if not isinstance(edge, dict):
                    logger.warning(f"跳过无效边: {edge}")
                    skipped_count += 1
                    continue

                source_id = edge.get('source') or edge.get('from')
                target_id = edge.get('target') or edge.get('to')

                if not source_id or not target_id:
                    logger.warning(f"跳过无效边（缺少源或目标）: {edge}")
                    skipped_count += 1
                    continue

                # 映射关系类型
                edge_type_str = edge.get('type', 'data_flow').upper()
                if edge_type_str == 'DATA_TRANSFORMATION' or edge_type_str == 'DATA_FLOW':
                    relationship_type = RelationshipType.FLOWS_TO
                elif edge_type_str == 'DEPENDS_ON' or edge_type_str == 'TASK_DEPENDENCY':
                    relationship_type = RelationshipType.DEPENDS_ON
                else:
                    relationship_type = RelationshipType.FLOWS_TO  # 默认为数据流

                # 生成唯一的边ID，包含类型信息避免重复
                edge_id = edge.get('id', f"{source_id}_{target_id}_{edge_type_str.lower()}")

                # 检查是否已存在相同的边
                existing_edge = await db.execute(
                    select(LineageEdgeCache).where(
                        LineageEdgeCache.edge_id == edge_id
                    )
                )

                if existing_edge.scalar_one_or_none():
                    logger.debug(f"边已存在，跳过: {edge_id}")
                    skipped_count += 1
                    continue

                # 使用MySQL的INSERT ... ON DUPLICATE KEY UPDATE，task_id设为NULL
                stmt = insert(LineageEdgeCache).values(
                    edge_id=edge_id,
                    source_node_id=source_id,
                    target_node_id=target_id,
                    relationship_type=relationship_type,
                    task_id=None,  # 设为NULL避免外键约束
                    edge_properties=edge,
                    created_at=get_shanghai_now(),
                    updated_at=get_shanghai_now()
                )

                # 在重复时更新
                stmt = stmt.on_duplicate_key_update(
                    source_node_id=stmt.inserted.source_node_id,
                    target_node_id=stmt.inserted.target_node_id,
                    relationship_type=stmt.inserted.relationship_type,
                    edge_properties=stmt.inserted.edge_properties,
                    updated_at=stmt.inserted.updated_at
                )

                result = await db.execute(stmt)
                if result.rowcount > 0:
                    saved_count += 1
                    logger.debug(f"保存边缓存: {source_id} -> {target_id} ({edge_type_str})")
                else:
                    updated_count += 1

            logger.info(f"边缓存保存完成: 新增={saved_count}, 更新={updated_count}, 跳过={skipped_count}")

        except Exception as e:
            logger.error(f"保存边缓存失败: {str(e)}")
            raise

    async def _save_edge_cache_with_validation(self, db: AsyncSession, edges: List[Dict], valid_node_ids: set):
        """保存边缓存并验证节点存在性"""
        try:
            saved_count = 0
            skipped_count = 0

            # 批量保存边缓存，使用ON DUPLICATE KEY UPDATE
            for edge in edges:
                # 检查必要字段
                if not isinstance(edge, dict):
                    logger.warning(f"跳过无效边: {edge}")
                    skipped_count += 1
                    continue

                source_id = edge.get('source') or edge.get('from')
                target_id = edge.get('target') or edge.get('to')

                if not source_id or not target_id:
                    logger.warning(f"跳过缺少源或目标节点的边: {edge}")
                    skipped_count += 1
                    continue

                # 验证源节点和目标节点是否存在
                if source_id not in valid_node_ids:
                    logger.warning(f"跳过边：源节点 '{source_id}' 不存在于节点缓存中")
                    skipped_count += 1
                    continue

                if target_id not in valid_node_ids:
                    logger.warning(f"跳过边：目标节点 '{target_id}' 不存在于节点缓存中")
                    skipped_count += 1
                    continue

                # 映射关系类型
                edge_type_str = edge.get('type', 'data_flow').upper()
                if edge_type_str == 'DATA_TRANSFORMATION' or edge_type_str == 'DATA_FLOW':
                    relationship_type = RelationshipType.FLOWS_TO
                elif edge_type_str == 'DEPENDS_ON' or edge_type_str == 'TASK_DEPENDENCY':
                    relationship_type = RelationshipType.DEPENDS_ON
                else:
                    relationship_type = RelationshipType.FLOWS_TO  # 默认为数据流

                edge_id = edge.get('id', f"{source_id}_{target_id}")

                # 使用MySQL的INSERT ... ON DUPLICATE KEY UPDATE，task_id设为NULL
                stmt = insert(LineageEdgeCache).values(
                    edge_id=edge_id,
                    source_node_id=source_id,
                    target_node_id=target_id,
                    relationship_type=relationship_type,
                    task_id=None,  # 设为NULL避免外键约束
                    edge_properties=edge,
                    created_at=get_shanghai_now(),
                    updated_at=get_shanghai_now()
                )

                # 在重复时更新
                stmt = stmt.on_duplicate_key_update(
                    source_node_id=stmt.inserted.source_node_id,
                    target_node_id=stmt.inserted.target_node_id,
                    relationship_type=stmt.inserted.relationship_type,
                    edge_properties=stmt.inserted.edge_properties,
                    updated_at=stmt.inserted.updated_at
                )

                await db.execute(stmt)
                saved_count += 1
                logger.debug(f"保存边缓存: {source_id} -> {target_id} ({edge_type_str})")

            logger.info(f"成功保存 {saved_count} 条边缓存，跳过 {skipped_count} 条无效边")

        except Exception as e:
            logger.error(f"保存边缓存失败: {str(e)}")
            raise

    async def _save_table_edge_cache_with_validation(self, db: AsyncSession, edges: List[Dict], valid_node_ids: set):
        """保存表级边缓存并验证节点存在性（设置task_id为1表示表级边）"""
        try:
            saved_count = 0
            skipped_count = 0

            logger.info(f"开始保存 {len(edges)} 条表级边缓存...")

            for edge in edges:
                # 检查必要字段
                if not isinstance(edge, dict):
                    logger.warning(f"跳过无效边: {edge}")
                    skipped_count += 1
                    continue

                source_id = edge.get('source') or edge.get('from')
                target_id = edge.get('target') or edge.get('to')

                if not source_id or not target_id:
                    logger.warning(f"跳过缺少源或目标的边: {edge}")
                    skipped_count += 1
                    continue

                # 验证节点存在性
                if source_id not in valid_node_ids or target_id not in valid_node_ids:
                    logger.warning(f"跳过引用不存在节点的边: {source_id} -> {target_id}")
                    skipped_count += 1
                    continue

                # 映射关系类型
                edge_type_str = edge.get('type', 'data_transformation').upper()
                if edge_type_str == 'DATA_TRANSFORMATION' or edge_type_str == 'DATA_FLOW':
                    relationship_type = RelationshipType.FLOWS_TO
                elif edge_type_str == 'DEPENDS_ON' or edge_type_str == 'TASK_DEPENDENCY':
                    relationship_type = RelationshipType.DEPENDS_ON
                else:
                    relationship_type = RelationshipType.FLOWS_TO  # 默认为数据流

                edge_id = edge.get('id', f"{source_id}_{target_id}")

                # 从边属性中获取dinky_task_id，然后查找对应的dp_parse_tasks.id
                dinky_task_id = edge.get('task_id')
                actual_task_id = None

                if dinky_task_id is not None:
                    # 查找对应的dp_parse_tasks.id
                    task_result = await db.execute(text("""
                        SELECT id FROM dp_parse_tasks WHERE dinky_task_id = :dinky_task_id LIMIT 1
                    """), {"dinky_task_id": dinky_task_id})

                    task_row = task_result.fetchone()
                    if task_row:
                        actual_task_id = task_row[0]
                    else:
                        logger.warning(f"未找到dinky_task_id={dinky_task_id}对应的dp_parse_tasks记录，将task_id设为NULL")
                        actual_task_id = None

                # 使用MySQL的INSERT ... ON DUPLICATE KEY UPDATE
                stmt = insert(LineageEdgeCache).values(
                    edge_id=edge_id,
                    source_node_id=source_id,
                    target_node_id=target_id,
                    relationship_type=relationship_type,
                    task_id=actual_task_id,  # 使用实际的task_id
                    edge_properties=edge,
                    created_at=get_shanghai_now(),
                    updated_at=get_shanghai_now()
                )

                # 在重复时更新
                stmt = stmt.on_duplicate_key_update(
                    source_node_id=stmt.inserted.source_node_id,
                    target_node_id=stmt.inserted.target_node_id,
                    relationship_type=stmt.inserted.relationship_type,
                    task_id=stmt.inserted.task_id,
                    edge_properties=stmt.inserted.edge_properties,
                    updated_at=stmt.inserted.updated_at
                )

                await db.execute(stmt)
                saved_count += 1
                logger.debug(f"保存表级边缓存: {source_id} -> {target_id}")

            logger.info(f"表级边缓存保存完成: 保存={saved_count}, 跳过={skipped_count}")

        except Exception as e:
            logger.error(f"保存表级边缓存失败: {str(e)}")
            raise

    async def _save_task_edge_cache_with_validation(self, db: AsyncSession, edges: List[Dict], valid_node_ids: set):
        """保存任务级边缓存并验证节点存在性（设置task_id为NULL表示任务级边）"""
        try:
            saved_count = 0
            skipped_count = 0

            logger.info(f"开始保存 {len(edges)} 条任务级边缓存...")

            for edge in edges:
                # 检查必要字段
                if not isinstance(edge, dict):
                    logger.warning(f"跳过无效边: {edge}")
                    skipped_count += 1
                    continue

                source_id = edge.get('source') or edge.get('from')
                target_id = edge.get('target') or edge.get('to')

                if not source_id or not target_id:
                    logger.warning(f"跳过缺少源或目标的边: {edge}")
                    skipped_count += 1
                    continue

                # 验证节点存在性
                if source_id not in valid_node_ids or target_id not in valid_node_ids:
                    logger.warning(f"跳过引用不存在节点的边: {source_id} -> {target_id}")
                    skipped_count += 1
                    continue

                # 映射关系类型
                edge_type_str = edge.get('type', 'task_dependency').upper()
                if edge_type_str == 'TASK_DEPENDENCY':
                    relationship_type = RelationshipType.DEPENDS_ON
                else:
                    relationship_type = RelationshipType.DEPENDS_ON  # 任务级边默认为依赖关系

                edge_id = edge.get('id', f"{source_id}_{target_id}")

                # 使用MySQL的INSERT ... ON DUPLICATE KEY UPDATE，task_id设为NULL表示任务级边
                stmt = insert(LineageEdgeCache).values(
                    edge_id=edge_id,
                    source_node_id=source_id,
                    target_node_id=target_id,
                    relationship_type=relationship_type,
                    task_id=None,  # 设为NULL表示任务级边
                    edge_properties=edge,
                    created_at=get_shanghai_now(),
                    updated_at=get_shanghai_now()
                )

                # 在重复时更新
                stmt = stmt.on_duplicate_key_update(
                    source_node_id=stmt.inserted.source_node_id,
                    target_node_id=stmt.inserted.target_node_id,
                    relationship_type=stmt.inserted.relationship_type,
                    edge_properties=stmt.inserted.edge_properties,
                    updated_at=stmt.inserted.updated_at
                )

                await db.execute(stmt)
                saved_count += 1
                logger.debug(f"保存任务级边缓存: {source_id} -> {target_id}")

            logger.info(f"任务级边缓存保存完成: 保存={saved_count}, 跳过={skipped_count}")

        except Exception as e:
            logger.error(f"保存任务级边缓存失败: {str(e)}")
            raise

    async def _save_to_neo4j(self, parse_results: Dict) -> Dict:
        """保存血缘图到Neo4j"""
        try:
            logger.info("保存数据到Neo4j...")
            
            if not self.neo4j_client.is_connected():
                logger.warning("Neo4j未连接，跳过Neo4j保存")
                return {
                    'success': False,
                    'message': 'Neo4j未连接',
                    'skipped': True
                }
            
            # 准备Neo4j查询
            queries = []
            
            # 1. 创建节点
            nodes = parse_results.get('nodes', [])
            for node in nodes:
                node_query = self._build_node_query(node)
                if node_query:
                    queries.append(node_query)
            
            # 2. 创建关系
            edges = parse_results.get('edges', [])
            for edge in edges:
                edge_query = self._build_edge_query(edge)
                if edge_query:
                    queries.append(edge_query)
            
            # 3. 创建任务级别的节点和关系
            task_nodes = parse_results.get('task_level_nodes', [])
            for task_node in task_nodes:
                task_query = self._build_task_node_query(task_node)
                if task_query:
                    queries.append(task_query)
            
            task_edges = parse_results.get('task_level_edges', [])
            for task_edge in task_edges:
                task_edge_query = self._build_task_edge_query(task_edge)
                if task_edge_query:
                    queries.append(task_edge_query)
            
            # 执行批量写入
            if queries:
                result = self.neo4j_client.execute_write_transaction(queries)
                
                logger.info(f"Neo4j保存完成，执行了 {len(queries)} 个查询")
                
                # 清理结果中的不可序列化对象
                cleaned_results = []
                for result_item in result.get('results', []):
                    cleaned_result = {
                        'query': result_item.get('query', ''),
                        'summary': {}
                    }
                    # 将SummaryCounters转换为字典
                    if 'summary' in result_item:
                        summary = result_item['summary']
                        if hasattr(summary, '_contains_updates'):
                            cleaned_result['summary'] = {
                                'contains_updates': getattr(summary, '_contains_updates', False),
                                'labels_added': getattr(summary, 'labels_added', 0),
                                'labels_removed': getattr(summary, 'labels_removed', 0),
                                'nodes_created': getattr(summary, 'nodes_created', 0),
                                'nodes_deleted': getattr(summary, 'nodes_deleted', 0),
                                'properties_set': getattr(summary, 'properties_set', 0),
                                'relationships_created': getattr(summary, 'relationships_created', 0),
                                'relationships_deleted': getattr(summary, 'relationships_deleted', 0)
                            }
                        else:
                            cleaned_result['summary'] = dict(summary) if summary else {}
                    cleaned_results.append(cleaned_result)

                return {
                    'success': result.get('success', False),
                    'queries_executed': len(queries),
                    'results': cleaned_results,
                    'error': result.get('error')
                }
            else:
                return {
                    'success': True,
                    'message': '没有数据需要保存到Neo4j',
                    'queries_executed': 0
                }
                
        except Exception as e:
            logger.error(f"Neo4j保存失败: {str(e)}")
            return {
                'success': False,
                'message': f'Neo4j保存失败: {str(e)}',
                'error': str(e)
            }
    
    def _build_node_query(self, node: Dict) -> Optional[Dict]:
        """构建节点创建查询"""
        try:
            node_id = node['id']
            node_type = node.get('type', 'Table')
            node_label = node['label']
            
            # 构建属性
            properties = {
                'id': node_id,
                'name': node_label,
                'type': node_type,
                'table_name': node.get('table_name', node_label),
                'task_id': node.get('task_id'),
                'task_name': node.get('task_name'),
                'created_at': get_shanghai_now().isoformat(),
                'updated_at': get_shanghai_now().isoformat()
            }
            
            # 添加字段信息
            if 'columns' in node and node['columns']:
                properties['columns'] = json.dumps(node['columns'])
            
            query = f"""
            MERGE (n:{node_type.capitalize()} {{id: $id}})
            SET n += $properties
            """
            
            return {
                'query': query,
                'parameters': {
                    'id': node_id,
                    'properties': properties
                }
            }
            
        except Exception as e:
            logger.error(f"构建节点查询失败: {str(e)}")
            return None
    
    def _build_edge_query(self, edge: Dict) -> Optional[Dict]:
        """构建关系创建查询"""
        try:
            source_id = edge['source']
            target_id = edge['target']
            edge_type = edge.get('type', 'FLOWS_TO').upper()
            
            properties = {
                'task_id': edge.get('task_id'),
                'task_name': edge.get('task_name'),
                'created_at': get_shanghai_now().isoformat()
            }
            
            query = f"""
            MATCH (source {{id: $source_id}})
            MATCH (target {{id: $target_id}})
            MERGE (source)-[r:{edge_type}]->(target)
            SET r += $properties
            """
            
            return {
                'query': query,
                'parameters': {
                    'source_id': source_id,
                    'target_id': target_id,
                    'properties': properties
                }
            }
            
        except Exception as e:
            logger.error(f"构建关系查询失败: {str(e)}")
            return None
    
    def _build_task_node_query(self, task_node: Dict) -> Optional[Dict]:
        """构建任务节点创建查询"""
        try:
            task_id = task_node['task_id']
            task_name = task_node['task_name']
            
            properties = {
                'id': f"task_{task_id}",
                'task_id': task_id,
                'name': task_name,
                'type': 'Task',
                'node_count': task_node.get('node_count', 0),
                'edge_count': task_node.get('edge_count', 0),
                'created_at': get_shanghai_now().isoformat(),
                'updated_at': get_shanghai_now().isoformat()
            }
            
            query = """
            MERGE (t:Task {id: $id})
            SET t += $properties
            """
            
            return {
                'query': query,
                'parameters': {
                    'id': f"task_{task_id}",
                    'properties': properties
                }
            }
            
        except Exception as e:
            logger.error(f"构建任务节点查询失败: {str(e)}")
            return None
    
    def _build_task_edge_query(self, task_edge: Dict) -> Optional[Dict]:
        """构建任务关系创建查询"""
        try:
            # 兼容不同的字段名
            source_task_id = (task_edge.get('source_task_id') or
                             task_edge.get('source') or
                             task_edge.get('from'))
            target_task_id = (task_edge.get('target_task_id') or
                             task_edge.get('target') or
                             task_edge.get('to'))

            if not source_task_id or not target_task_id:
                logger.warning(f"任务边缺少必要字段: {task_edge}")
                return None
            
            properties = {
                'relationship_type': task_edge.get('relationship_type', 'DEPENDS_ON'),
                'shared_tables': task_edge.get('shared_tables', []),
                'created_at': get_shanghai_now().isoformat()
            }
            
            query = """
            MATCH (source:Task {id: $source_id})
            MATCH (target:Task {id: $target_id})
            MERGE (source)-[r:DEPENDS_ON]->(target)
            SET r += $properties
            """
            
            return {
                'query': query,
                'parameters': {
                    'source_id': f"task_{source_task_id}",
                    'target_id': f"task_{target_task_id}",
                    'properties': properties
                }
            }
            
        except Exception as e:
            logger.error(f"构建任务关系查询失败: {str(e)}")
            return None
    
    async def _record_sync_log(self, db: AsyncSession, parse_results: Dict, mysql_result: Dict, neo4j_result: Dict) -> Dict:
        """记录同步日志"""
        try:
            # 为每个任务创建同步日志
            for task_result in parse_results.get('task_lineage_results', []):
                task_id = task_result['task_id']
                
                # 获取ParseTask的ID
                existing_task = await db.execute(
                    select(ParseTask).where(ParseTask.dinky_task_id == task_id)
                )
                task = existing_task.scalar_one_or_none()
                
                if task:
                    # 处理Neo4j操作结果，确保可以JSON序列化
                    neo4j_operations = []
                    for result in neo4j_result.get('results', []):
                        operation = {
                            'query': result.get('query', ''),
                            'summary': {}
                        }
                        # 将SummaryCounters转换为字典
                        if 'summary' in result:
                            summary = result['summary']
                            if hasattr(summary, '_contains_updates'):
                                operation['summary'] = {
                                    'contains_updates': getattr(summary, '_contains_updates', False),
                                    'labels_added': getattr(summary, 'labels_added', 0),
                                    'labels_removed': getattr(summary, 'labels_removed', 0),
                                    'nodes_created': getattr(summary, 'nodes_created', 0),
                                    'nodes_deleted': getattr(summary, 'nodes_deleted', 0),
                                    'properties_set': getattr(summary, 'properties_set', 0),
                                    'relationships_created': getattr(summary, 'relationships_created', 0),
                                    'relationships_deleted': getattr(summary, 'relationships_deleted', 0)
                                }
                            else:
                                operation['summary'] = dict(summary) if summary else {}
                        neo4j_operations.append(operation)

                    sync_log = LineageSyncLog(
                        task_id=task.id,
                        sync_type=SyncType.FULL,
                        operation_type=OperationType.MERGE,
                        affected_nodes=task_result.get('node_count', 0),
                        affected_edges=task_result.get('edge_count', 0),
                        sync_status=SyncStatus.SUCCESS if mysql_result.get('success') and neo4j_result.get('success') else SyncStatus.FAILED,
                        neo4j_operations=neo4j_operations,
                        execution_time_ms=int(parse_results.get('statistics', {}).get('processing_time', 0) * 1000),
                        sync_data_hash=self._calculate_data_hash(task_result),
                        created_at=get_shanghai_now()
                    )
                    db.add(sync_log)
            
            await db.commit()
            
            return {
                'success': True,
                'message': '同步日志记录完成'
            }
            
        except Exception as e:
            logger.error(f"记录同步日志失败: {str(e)}")
            return {
                'success': False,
                'message': f'同步日志记录失败: {str(e)}'
            }
    
    def _calculate_data_hash(self, data: Dict) -> str:
        """计算数据哈希值"""
        try:
            data_str = json.dumps(data, sort_keys=True)
            return hashlib.md5(data_str.encode()).hexdigest()
        except Exception:
            return ""

    async def verify_cache_integrity(self, db: AsyncSession) -> Dict:
        """验证缓存数据的完整性"""
        try:
            logger.info("开始验证缓存数据完整性...")

            # 统计缓存表中的数据 - 使用 count() 查询而不是 fetchall()
            nodes_count_result = await db.execute(select(func.count(LineageNodeCache.id)))
            nodes_count = nodes_count_result.scalar()

            edges_count_result = await db.execute(select(func.count(LineageEdgeCache.id)))
            edges_count = edges_count_result.scalar()

            # 获取最新的解析任务
            latest_task_result = await db.execute(
                select(ParseTask).where(ParseTask.parse_status == ParseStatus.SUCCESS)
                .order_by(ParseTask.updated_at.desc()).limit(1)
            )
            latest_task = latest_task_result.scalar_one_or_none()

            verification_result = {
                'cache_nodes_count': nodes_count,
                'cache_edges_count': edges_count,
                'latest_task_id': latest_task.id if latest_task else None,
                'latest_task_name': latest_task.task_name if latest_task else None,
                'cache_empty': nodes_count == 0 and edges_count == 0,
                'verification_time': get_shanghai_now().isoformat()
            }

            if verification_result['cache_empty']:
                logger.warning("⚠️ 缓存表为空！可能存在数据保存问题")
            else:
                logger.info(f"✅ 缓存验证完成: {nodes_count} 个节点, {edges_count} 条边")

            return {
                'success': True,
                'data': verification_result
            }

        except Exception as e:
            logger.error(f"缓存完整性验证失败: {str(e)}")
            return {
                'success': False,
                'error': str(e)
            }

    async def force_rebuild_cache(self, db: AsyncSession) -> Dict:
        """强制重建缓存数据"""
        try:
            logger.info("开始强制重建缓存数据...")

            # 获取所有成功的解析任务
            tasks_result = await db.execute(
                select(ParseTask).where(ParseTask.parse_status == ParseStatus.SUCCESS)
                .order_by(ParseTask.updated_at.desc())
            )
            tasks = tasks_result.scalars().all()

            if not tasks:
                return {
                    'success': False,
                    'message': '没有找到成功的解析任务'
                }

            # 清理现有缓存
            await self._clear_old_cache(db)
            await db.commit()

            # 重建缓存
            total_nodes = 0
            total_edges = 0

            for task in tasks:
                if task.parse_result:
                    try:
                        parse_result = task.parse_result

                        # 保存节点缓存
                        nodes = parse_result.get('nodes', [])
                        if nodes:
                            await self._save_node_cache(db, task.dinky_task_id, nodes)
                            total_nodes += len(nodes)

                        # 保存边缓存
                        edges = parse_result.get('edges', [])
                        if edges:
                            await self._save_edge_cache_without_task_id(db, edges)
                            total_edges += len(edges)

                    except Exception as task_error:
                        logger.error(f"重建任务 {task.id} 缓存失败: {str(task_error)}")

            await db.commit()

            logger.info(f"缓存重建完成: {total_nodes} 个节点, {total_edges} 条边")

            return {
                'success': True,
                'message': f'缓存重建完成: {total_nodes} 个节点, {total_edges} 条边',
                'nodes_count': total_nodes,
                'edges_count': total_edges
            }

        except Exception as e:
            logger.error(f"强制重建缓存失败: {str(e)}")
            await db.rollback()
            return {
                'success': False,
                'message': f'缓存重建失败: {str(e)}',
                'error': str(e)
            }


# 全局存储服务实例
lineage_storage_service = LineageStorageService()
