"""
高级FlinkSQL解析服务
使用SQLGlot和多种解析策略来完美解析FlinkSQL语句，提取表依赖关系和数据流向
"""

import re
import sqlparse
import sqlglot
from sqlglot import exp, parse_one, transpile
from sqlglot.errors import ParseError as SQLGlotParseError
from sqlparse.sql import Statement, IdentifierList, Identifier, Function
from sqlparse.tokens import Keyword, DML
from typing import List, Dict, Set, Optional, Tuple, Any
from app.core.logger import logger


class SQLParserService:
    """高级SQL解析服务类"""

    def __init__(self):
        self.source_tables: Set[str] = set()
        self.target_tables: Set[str] = set()
        self.temp_tables: Set[str] = set()
        self.views: Set[str] = set()
        self.connectors: Set[str] = set()  # FlinkSQL连接器
        self.data_flows: List[Tuple[str, str, str]] = []  # (source, target, operation)
        
    def parse_flink_sql(self, sql_statement: str) -> Dict[str, List[str]]:
        """
        解析FlinkSQL语句，提取表依赖关系

        Args:
            sql_statement: FlinkSQL语句

        Returns:
            包含源表、目标表、临时表、视图的字典
        """
        try:
            # 清空之前的结果
            self._reset()

            # 预处理SQL语句
            cleaned_sql = self._preprocess_sql(sql_statement)

            # 使用正则表达式解析（更适合FlinkSQL）
            self._parse_with_regex(cleaned_sql)

            # 如果正则表达式没有解析到结果，尝试sqlparse
            if not any([self.source_tables, self.target_tables, self.temp_tables, self.views]):
                parsed = sqlparse.parse(cleaned_sql)
                for statement in parsed:
                    if statement.get_type() != 'UNKNOWN':
                        self._analyze_statement(statement)

            return {
                'source_tables': list(self.source_tables),
                'target_tables': list(self.target_tables),
                'temp_tables': list(self.temp_tables),
                'views': list(self.views)
            }

        except Exception as e:
            logger.error(f"SQL解析失败: {str(e)}")
            return {
                'source_tables': [],
                'target_tables': [],
                'temp_tables': [],
                'views': []
            }
    

    
    def _preprocess_sql(self, sql: str) -> str:
        """预处理SQL语句"""
        # 移除注释
        sql = re.sub(r'--.*?\n', '\n', sql)
        sql = re.sub(r'/\*.*?\*/', '', sql, flags=re.DOTALL)
        
        # 标准化空白字符
        sql = re.sub(r'\s+', ' ', sql).strip()
        
        return sql

    def _parse_with_regex(self, sql: str):
        """使用正则表达式解析SQL（更适合FlinkSQL）"""
        # 转换为大写以便匹配
        sql_upper = sql.upper()

        # 1. 解析INSERT INTO语句
        insert_patterns = [
            r'INSERT\s+INTO\s+([`\w\.\-]+)',
            r'INSERT\s+OVERWRITE\s+([`\w\.\-]+)',
        ]

        for pattern in insert_patterns:
            matches = re.findall(pattern, sql_upper)
            for match in matches:
                table_name = self._clean_table_name(match)
                if table_name:
                    self.target_tables.add(table_name.lower())

        # 2. 解析FROM子句
        from_patterns = [
            r'FROM\s+([`\w\.\-]+)',
            r'FROM\s+\(\s*SELECT.*?\)\s+AS\s+([`\w\.\-]+)',  # 子查询别名
        ]

        for pattern in from_patterns:
            matches = re.findall(pattern, sql_upper, re.DOTALL)
            for match in matches:
                table_name = self._clean_table_name(match)
                if table_name and not self._is_keyword(table_name):
                    self.source_tables.add(table_name.lower())

        # 3. 解析JOIN语句
        join_patterns = [
            r'(?:INNER\s+|LEFT\s+|RIGHT\s+|FULL\s+)?JOIN\s+([`\w\.\-]+)',
        ]

        for pattern in join_patterns:
            matches = re.findall(pattern, sql_upper)
            for match in matches:
                table_name = self._clean_table_name(match)
                if table_name and not self._is_keyword(table_name):
                    self.source_tables.add(table_name.lower())

        # 4. 解析CREATE TABLE语句
        create_table_patterns = [
            r'CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?([`\w\.\-]+)',
            r'CREATE\s+TEMPORARY\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?([`\w\.\-]+)',
        ]

        for pattern in create_table_patterns:
            matches = re.findall(pattern, sql_upper)
            for match in matches:
                table_name = self._clean_table_name(match)
                if table_name:
                    if 'TEMPORARY' in pattern:
                        self.temp_tables.add(table_name.lower())
                    else:
                        self.target_tables.add(table_name.lower())

        # 5. 解析CREATE VIEW语句
        create_view_patterns = [
            r'CREATE\s+VIEW\s+(?:IF\s+NOT\s+EXISTS\s+)?([`\w\.\-]+)',
            r'CREATE\s+TEMPORARY\s+VIEW\s+(?:IF\s+NOT\s+EXISTS\s+)?([`\w\.\-]+)',
        ]

        for pattern in create_view_patterns:
            matches = re.findall(pattern, sql_upper)
            for match in matches:
                view_name = self._clean_table_name(match)
                if view_name:
                    self.views.add(view_name.lower())

        # 6. 解析WITH子句（CTE）
        with_pattern = r'WITH\s+([`\w\.\-]+)\s+AS\s*\('
        matches = re.findall(with_pattern, sql_upper)
        for match in matches:
            temp_name = self._clean_table_name(match)
            if temp_name:
                self.temp_tables.add(temp_name.lower())

    def _analyze_statement(self, statement: Statement):
        """分析单个SQL语句"""
        sql_type = self._get_statement_type(statement)
        
        if sql_type == 'SELECT':
            self._parse_select_statement(statement)
        elif sql_type == 'INSERT':
            self._parse_insert_statement(statement)
        elif sql_type == 'CREATE':
            self._parse_create_statement(statement)
        elif sql_type == 'WITH':
            self._parse_with_statement(statement)
    
    def _get_statement_type(self, statement: Statement) -> str:
        """获取语句类型"""
        first_token = statement.token_first(skip_ws=True, skip_cm=True)
        if first_token and first_token.ttype is Keyword:
            return first_token.value.upper()
        return 'UNKNOWN'
    
    def _parse_select_statement(self, statement: Statement):
        """解析SELECT语句"""
        from_seen = False
        
        for token in statement.flatten():
            if token.ttype is Keyword and token.value.upper() == 'FROM':
                from_seen = True
                continue
            
            if from_seen and token.ttype is None and not token.is_whitespace:
                table_name = self._clean_table_name(token.value)
                if table_name and not self._is_keyword(table_name):
                    self.source_tables.add(table_name)
                    from_seen = False
    
    def _parse_insert_statement(self, statement: Statement):
        """解析INSERT语句"""
        into_seen = False
        select_seen = False
        
        for token in statement.flatten():
            if token.ttype is Keyword:
                keyword = token.value.upper()
                if keyword == 'INTO':
                    into_seen = True
                elif keyword == 'SELECT':
                    select_seen = True
                    into_seen = False
                elif keyword == 'FROM' and select_seen:
                    # 处理FROM子句中的源表
                    continue
            
            if into_seen and token.ttype is None and not token.is_whitespace:
                table_name = self._clean_table_name(token.value)
                if table_name and not self._is_keyword(table_name):
                    self.target_tables.add(table_name)
                    into_seen = False
            
            if select_seen and token.ttype is None and not token.is_whitespace:
                # 这里需要更复杂的逻辑来处理FROM子句
                pass
    
    def _parse_create_statement(self, statement: Statement):
        """解析CREATE语句"""
        statement_str = str(statement).upper()
        
        if 'CREATE TABLE' in statement_str:
            self._parse_create_table(statement)
        elif 'CREATE VIEW' in statement_str:
            self._parse_create_view(statement)
        elif 'CREATE TEMPORARY' in statement_str:
            self._parse_create_temp_table(statement)
    
    def _parse_create_table(self, statement: Statement):
        """解析CREATE TABLE语句"""
        table_seen = False
        
        for token in statement.flatten():
            if token.ttype is Keyword and token.value.upper() == 'TABLE':
                table_seen = True
                continue
            
            if table_seen and token.ttype is None and not token.is_whitespace:
                table_name = self._clean_table_name(token.value)
                if table_name and not self._is_keyword(table_name):
                    self.target_tables.add(table_name)
                    break
    
    def _parse_create_view(self, statement: Statement):
        """解析CREATE VIEW语句"""
        view_seen = False
        
        for token in statement.flatten():
            if token.ttype is Keyword and token.value.upper() == 'VIEW':
                view_seen = True
                continue
            
            if view_seen and token.ttype is None and not token.is_whitespace:
                view_name = self._clean_table_name(token.value)
                if view_name and not self._is_keyword(view_name):
                    self.views.add(view_name)
                    break
    
    def _parse_create_temp_table(self, statement: Statement):
        """解析CREATE TEMPORARY TABLE语句"""
        temp_table_pattern = r'CREATE\s+TEMPORARY\s+TABLE\s+(\w+(?:\.\w+)?)'
        match = re.search(temp_table_pattern, str(statement), re.IGNORECASE)
        if match:
            self.temp_tables.add(match.group(1))
    
    def _parse_with_statement(self, statement: Statement):
        """解析WITH语句（CTE）"""
        # WITH语句通常创建临时表
        with_pattern = r'WITH\s+(\w+)\s+AS'
        matches = re.findall(with_pattern, str(statement), re.IGNORECASE)
        for match in matches:
            self.temp_tables.add(match)
    
    def _clean_table_name(self, name: str) -> str:
        """清理表名"""
        if not name:
            return ""

        # 移除引号和括号
        name = name.strip('`"\'()[]')
        # 移除末尾的逗号和分号
        name = name.rstrip(',;')
        # 移除空白字符
        name = name.strip()

        # 如果包含点号，可能是 database.table 格式
        if '.' in name:
            parts = name.split('.')
            # 返回表名部分（最后一部分）
            name = parts[-1] if parts else name

        # 过滤掉明显不是表名的内容
        if not name or len(name) < 1 or name.isdigit():
            return ""

        return name
    
    def _is_keyword(self, word: str) -> bool:
        """检查是否为SQL关键字"""
        keywords = {
            'SELECT', 'FROM', 'WHERE', 'JOIN', 'INNER', 'LEFT', 'RIGHT', 'FULL',
            'ON', 'AS', 'AND', 'OR', 'NOT', 'IN', 'EXISTS', 'BETWEEN', 'LIKE',
            'ORDER', 'BY', 'GROUP', 'HAVING', 'LIMIT', 'OFFSET', 'UNION',
            'INSERT', 'INTO', 'VALUES', 'UPDATE', 'SET', 'DELETE', 'CREATE',
            'TABLE', 'VIEW', 'INDEX', 'DROP', 'ALTER', 'ADD', 'COLUMN',
            'PRIMARY', 'KEY', 'FOREIGN', 'REFERENCES', 'CONSTRAINT', 'NULL',
            'DEFAULT', 'AUTO_INCREMENT', 'UNIQUE', 'CHECK'
        }
        return word.upper() in keywords
    
    def extract_table_dependencies(self, sql_statement: str) -> List[Tuple[str, str]]:
        """
        提取表依赖关系
        
        Returns:
            List of (source_table, target_table) tuples
        """
        result = self.parse_flink_sql(sql_statement)
        dependencies = []
        
        # 从源表到目标表的依赖
        for target in result['target_tables']:
            for source in result['source_tables']:
                dependencies.append((source, target))
        
        # 从源表到视图的依赖
        for view in result['views']:
            for source in result['source_tables']:
                dependencies.append((source, view))
        
        return dependencies

    def parse_flink_sql_advanced(self, sql_statement: str) -> Dict[str, Any]:
        """
        使用SQLGlot和多种策略高级解析FlinkSQL语句

        Args:
            sql_statement: FlinkSQL语句

        Returns:
            包含详细解析结果的字典
        """
        try:
            # 清空之前的结果
            self._reset()

            # 预处理SQL语句
            cleaned_sql = self._preprocess_sql(sql_statement)

            # 策略1: 使用SQLGlot解析（支持多种SQL方言）
            sqlglot_result = self._parse_with_sqlglot(cleaned_sql)

            # 策略2: FlinkSQL特有语法解析
            flink_result = self._parse_flink_specific(cleaned_sql)

            # 策略3: 传统正则表达式解析（作为补充）
            regex_result = self._parse_with_regex_enhanced(cleaned_sql)

            # 合并结果
            self._merge_results(sqlglot_result, flink_result, regex_result)

            # 分析数据流向
            self._analyze_data_flows()

            return {
                'source_tables': list(self.source_tables),
                'target_tables': list(self.target_tables),
                'temp_tables': list(self.temp_tables),
                'views': list(self.views),
                'connectors': list(self.connectors),
                'data_flows': self.data_flows,
                'parsing_strategies': {
                    'sqlglot': sqlglot_result,
                    'flink_specific': flink_result,
                    'regex': regex_result
                }
            }

        except Exception as e:
            logger.error(f"高级SQL解析失败: {str(e)}")
            # 回退到原始解析方法
            return self.parse_flink_sql(sql_statement)

    def _parse_with_sqlglot(self, sql: str) -> Dict[str, Any]:
        """使用SQLGlot解析SQL"""
        result = {
            'success': False,
            'tables': set(),
            'sources': set(),
            'targets': set(),
            'error': None
        }

        try:
            # 尝试多种SQL方言解析
            dialects = ['', 'spark', 'hive', 'postgres', 'mysql', 'presto']

            for dialect in dialects:
                try:
                    # 解析SQL语句
                    if dialect:
                        parsed = parse_one(sql, dialect=dialect)
                    else:
                        parsed = parse_one(sql)

                    if parsed:
                        # 提取表信息
                        tables = self._extract_tables_from_sqlglot(parsed)
                        if tables:
                            result['tables'].update(tables)
                            result['success'] = True
                            logger.info(f"SQLGlot成功解析 (方言: {dialect or 'default'})")
                            break

                except SQLGlotParseError as e:
                    logger.debug(f"SQLGlot解析失败 (方言: {dialect or 'default'}): {str(e)}")
                    continue
                except Exception as e:
                    logger.debug(f"SQLGlot解析异常 (方言: {dialect or 'default'}): {str(e)}")
                    continue

        except Exception as e:
            result['error'] = str(e)
            logger.error(f"SQLGlot解析出错: {str(e)}")

        return result

    def _extract_tables_from_sqlglot(self, parsed_sql) -> Set[str]:
        """从SQLGlot解析结果中提取表信息"""
        tables = set()

        try:
            # 查找所有表引用
            for table in parsed_sql.find_all(exp.Table):
                if table.name:
                    table_name = table.name.lower()
                    tables.add(table_name)

                    # 根据上下文判断是源表还是目标表
                    parent = table.parent
                    if isinstance(parent, exp.From) or isinstance(parent, exp.Join):
                        self.source_tables.add(table_name)
                    elif isinstance(parent, exp.Into):
                        self.target_tables.add(table_name)

            # 查找INSERT INTO语句
            for insert in parsed_sql.find_all(exp.Insert):
                if insert.this and hasattr(insert.this, 'name'):
                    target_table = insert.this.name.lower()
                    self.target_tables.add(target_table)
                    tables.add(target_table)

            # 查找CREATE TABLE语句
            for create in parsed_sql.find_all(exp.Create):
                if create.this and hasattr(create.this, 'name'):
                    created_table = create.this.name.lower()
                    if create.kind == 'TABLE':
                        self.target_tables.add(created_table)
                    elif create.kind == 'VIEW':
                        self.views.add(created_table)
                    tables.add(created_table)

            # 查找WITH子句（CTE）
            for with_clause in parsed_sql.find_all(exp.With):
                for cte in with_clause.expressions:
                    if hasattr(cte, 'alias') and cte.alias:
                        cte_name = cte.alias.lower()
                        self.temp_tables.add(cte_name)
                        tables.add(cte_name)

        except Exception as e:
            logger.error(f"从SQLGlot AST提取表信息失败: {str(e)}")

        return tables

    def _parse_flink_specific(self, sql: str) -> Dict[str, Any]:
        """解析FlinkSQL特有语法"""
        result = {
            'success': False,
            'connectors': set(),
            'cdc_sources': set(),
            'kafka_topics': set(),
            'upsert_kafka_topics': set(),
            'sqlserver_cdc_tables': set(),
            'pulsar_topics': set(),
            'redis_keys': set(),
            'elasticsearch_indexes': set(),
            'hbase_tables': set(),
            'mongodb_collections': set(),
            'hdfs_paths': set(),
            's3_paths': set(),
            'catalogs': set(),
            'error': None
        }

        try:
            sql_upper = sql.upper()

            # 1. 解析CREATE CATALOG语句
            catalog_pattern = r'CREATE\s+CATALOG\s+([`\w\.\-]+)\s+WITH\s*\('
            catalog_matches = re.findall(catalog_pattern, sql_upper)
            for match in catalog_matches:
                catalog_name = self._clean_table_name(match)
                if catalog_name:
                    result['catalogs'].add(catalog_name.lower())
                    self.connectors.add(f"catalog:{catalog_name.lower()}")

            # 2. 解析EXECUTE CDCSOURCE语句
            cdc_pattern = r'EXECUTE\s+CDCSOURCE\s+([`\w\.\-]+)'
            cdc_matches = re.findall(cdc_pattern, sql_upper)
            for match in cdc_matches:
                cdc_name = self._clean_table_name(match)
                if cdc_name:
                    result['cdc_sources'].add(cdc_name.lower())
                    self.connectors.add(f"cdc:{cdc_name.lower()}")

            # 3. 解析连接器配置 - 增强版，提取实际标识符
            self._extract_connector_identifiers(sql, result)

            # 传统连接器类型解析（保持兼容性）
            connector_patterns = [
                r"'connector'\s*=\s*'([^']+)'",
                r'"connector"\s*=\s*"([^"]+)"'
            ]

            for pattern in connector_patterns:
                connector_matches = re.findall(pattern, sql, re.IGNORECASE)
                for match in connector_matches:
                    result['connectors'].add(match.lower())
                    self.connectors.add(f"connector:{match.lower()}")

            # 4. 解析各种消息队列和存储系统的标识符
            # Kafka Topics (包括 upsert-kafka)
            kafka_topic_patterns = [
                r"'topic'\s*=\s*'([^']+)'",
                r'"topic"\s*=\s*"([^"]+)"',
                r"'sink\.topic'\s*=\s*'([^']+)'",
                r'"sink\.topic"\s*=\s*"([^"]+)"'
            ]

            # Upsert Kafka Topics (特殊处理)
            upsert_kafka_patterns = [
                r"'connector'\s*=\s*'upsert-kafka'.*?'topic'\s*=\s*'([^']+)'",
                r'"connector"\s*=\s*"upsert-kafka".*?"topic"\s*=\s*"([^"]+)"'
            ]

            # Pulsar Topics
            pulsar_topic_patterns = [
                r"'topic'\s*=\s*'([^']+)'",  # Pulsar 也使用 topic
                r"'service-url'\s*=\s*'pulsar://[^']*'",
                r"'admin-url'\s*=\s*'http://[^']*'"
            ]

            # Redis Keys/Streams
            redis_patterns = [
                r"'redis-mode'\s*=\s*'([^']+)'",
                r"'key'\s*=\s*'([^']+)'",
                r"'stream'\s*=\s*'([^']+)'",
                r"'hash-name'\s*=\s*'([^']+)'"
            ]

            # Elasticsearch Indexes
            es_patterns = [
                r"'index'\s*=\s*'([^']+)'",
                r"'document-type'\s*=\s*'([^']+)'"
            ]

            # HBase Tables
            hbase_patterns = [
                r"'table-name'\s*=\s*'([^']+)'",
                r"'family'\s*=\s*'([^']+)'"
            ]

            # MongoDB Collections
            mongo_patterns = [
                r"'collection'\s*=\s*'([^']+)'",
                r"'database'\s*=\s*'([^']+)'"
            ]

            # HDFS Paths
            hdfs_patterns = [
                r"'path'\s*=\s*'hdfs://([^']+)'",
                r"'base-path'\s*=\s*'hdfs://([^']+)'"
            ]

            # S3 Buckets/Paths
            s3_patterns = [
                r"'bucket'\s*=\s*'([^']+)'",
                r"'path'\s*=\s*'s3://([^']+)'"
            ]

            # SQL Server CDC
            sqlserver_cdc_patterns = [
                r"'connector'\s*=\s*'sqlserver-cdc'.*?'database-name'\s*=\s*'([^']+)'",
                r"'connector'\s*=\s*'sqlserver-cdc'.*?'table-name'\s*=\s*'([^']+)'",
                r"'connector'\s*=\s*'sqlserver-cdc'.*?'schema-name'\s*=\s*'([^']+)'",
                r'"connector"\s*=\s*"sqlserver-cdc".*?"database-name"\s*=\s*"([^"]+)"',
                r'"connector"\s*=\s*"sqlserver-cdc".*?"table-name"\s*=\s*"([^"]+)"'
            ]

            # 处理 Kafka Topics (普通 kafka)
            for pattern in kafka_topic_patterns:
                topic_matches = re.findall(pattern, sql, re.IGNORECASE)
                for match in topic_matches:
                    # 检查是否是 upsert-kafka，如果不是才添加到普通 kafka
                    if 'upsert-kafka' not in sql.lower():
                        result['kafka_topics'].add(match)
                        self.connectors.add(f"kafka_topic:{match}")

            # 处理 Upsert Kafka Topics
            for pattern in upsert_kafka_patterns:
                topic_matches = re.findall(pattern, sql, re.IGNORECASE | re.DOTALL)
                for match in topic_matches:
                    if 'upsert_kafka_topics' not in result:
                        result['upsert_kafka_topics'] = set()
                    result['upsert_kafka_topics'].add(match)
                    self.connectors.add(f"upsert_kafka_topic:{match}")

            # 处理 SQL Server CDC
            for pattern in sqlserver_cdc_patterns:
                matches = re.findall(pattern, sql, re.IGNORECASE | re.DOTALL)
                for match in matches:
                    if 'sqlserver_cdc_tables' not in result:
                        result['sqlserver_cdc_tables'] = set()
                    result['sqlserver_cdc_tables'].add(match)
                    self.connectors.add(f"sqlserver_cdc_table:{match}")

            # 处理其他类型的连接器标识符
            connector_patterns = {
                'pulsar_topic': pulsar_topic_patterns,
                'redis_key': redis_patterns,
                'elasticsearch_index': es_patterns,
                'hbase_table': hbase_patterns,
                'mongodb_collection': mongo_patterns,
                'hdfs_path': hdfs_patterns,
                's3_path': s3_patterns
            }

            for connector_type, patterns in connector_patterns.items():
                for pattern in patterns:
                    matches = re.findall(pattern, sql, re.IGNORECASE)
                    for match in matches:
                        if connector_type not in result:
                            result[connector_type] = set()
                        result[connector_type].add(match)
                        self.connectors.add(f"{connector_type}:{match}")

            # 5. 解析表名模式（用于CDC）
            table_name_patterns = [
                r"'table-name'\s*=\s*'([^']+)'",
                r'"table-name"\s*=\s*"([^"]+)"'
            ]

            for pattern in table_name_patterns:
                table_matches = re.findall(pattern, sql, re.IGNORECASE)
                for match in table_matches:
                    # 处理正则表达式格式的表名，如 'tenant\.zz_task_detail'
                    table_name = match.replace('\\', '').replace('.', '_')
                    self.source_tables.add(table_name.lower())

            # 检查是否解析到任何连接器信息
            connector_keys = ['connectors', 'cdc_sources', 'kafka_topics', 'upsert_kafka_topics',
                            'sqlserver_cdc_tables', 'pulsar_topics', 'redis_keys',
                            'elasticsearch_indexes', 'hbase_tables', 'mongodb_collections',
                            'hdfs_paths', 's3_paths', 'catalogs']

            if any(result.get(key) for key in connector_keys):
                result['success'] = True

        except Exception as e:
            result['error'] = str(e)
            logger.error(f"FlinkSQL特有语法解析失败: {str(e)}")

        return result

    def _merge_results(self, sqlglot_result: Dict, flink_result: Dict, regex_result: Dict):
        """合并不同解析策略的结果"""
        try:
            # 合并连接器信息
            if flink_result.get('connectors'):
                self.connectors.update(flink_result['connectors'])

            # 如果SQLGlot解析成功，优先使用其结果
            if sqlglot_result.get('success'):
                logger.info("使用SQLGlot解析结果")
            else:
                logger.info("SQLGlot解析失败，使用其他策略结果")

        except Exception as e:
            logger.error(f"合并解析结果失败: {str(e)}")

    def _analyze_data_flows(self):
        """分析数据流向 - 改进版，只分析同一SQL语句内的真实依赖关系"""
        try:
            self.data_flows.clear()

            # 注意：这里我们需要在单个SQL语句的上下文中分析依赖关系
            # 而不是简单地连接所有源表和目标表

            # 如果同时有源表和目标表，说明是数据转换流程
            if self.source_tables and self.target_tables:
                # 只有当源表和目标表在同一个SQL语句中出现时才建立连接
                for target in self.target_tables:
                    for source in self.source_tables:
                        if source != target:  # 避免自循环
                            self.data_flows.append((source, target, 'data_transformation'))

            # 临时表到目标表的流向
            for temp in self.temp_tables:
                for target in self.target_tables:
                    if temp != target:
                        self.data_flows.append((temp, target, 'temp_to_target'))

            # 源表到临时表的流向
            for source in self.source_tables:
                for temp in self.temp_tables:
                    if source != temp:
                        self.data_flows.append((source, temp, 'source_to_temp'))

            # 视图依赖关系
            for source in self.source_tables:
                for view in self.views:
                    if source != view:
                        self.data_flows.append((source, view, 'view_dependency'))

            logger.info(f"分析出 {len(self.data_flows)} 个数据流向")

        except Exception as e:
            logger.error(f"数据流向分析失败: {str(e)}")

    def _reset(self):
        """重置解析结果"""
        self.source_tables.clear()
        self.target_tables.clear()
        self.temp_tables.clear()
        self.views.clear()
        self.connectors.clear()
        self.data_flows.clear()

    def build_data_lineage_graph(self, sql_statements: List[str]) -> Dict[str, Any]:
        """
        构建数据血缘图 - 改进版，按SQL语句逐个分析真实依赖关系

        Args:
            sql_statements: FlinkSQL语句列表

        Returns:
            数据血缘图信息
        """
        try:
            all_nodes = set()
            all_edges = []
            all_connectors = set()
            sql_analysis_results = []

            # 解析每个SQL语句
            for i, sql in enumerate(sql_statements):
                logger.info(f"解析第 {i+1}/{len(sql_statements)} 个SQL语句")

                # 确保每次解析前都重置状态
                self._reset()

                # 解析单个SQL语句
                result = self.parse_flink_sql_advanced(sql)

                # 记录这个SQL的分析结果
                sql_result = {
                    'sql_index': i,
                    'source_tables': result['source_tables'],
                    'target_tables': result['target_tables'],
                    'temp_tables': result['temp_tables'],
                    'views': result['views'],
                    'connectors': result['connectors'],
                    'data_flows': result['data_flows']
                }
                sql_analysis_results.append(sql_result)

                # 收集节点（只收集有意义的节点）
                if result['source_tables'] or result['target_tables']:
                    # 过滤有效的表名
                    valid_sources = [t for t in result['source_tables'] if self._is_valid_table_name(t)]
                    valid_targets = [t for t in result['target_tables'] if self._is_valid_table_name(t)]
                    valid_temps = [t for t in result['temp_tables'] if self._is_valid_table_name(t)]
                    valid_views = [t for t in result['views'] if self._is_valid_table_name(t)]

                    all_nodes.update(valid_sources)
                    all_nodes.update(valid_targets)
                    all_nodes.update(valid_temps)
                    all_nodes.update(valid_views)

                # 收集连接器
                all_connectors.update(result['connectors'])

                # 收集边（只收集真实的数据流）
                for source, target, flow_type in result['data_flows']:
                    # 避免重复的边
                    edge_exists = any(
                        edge['source'] == source and edge['target'] == target
                        for edge in all_edges
                    )
                    if not edge_exists:
                        all_edges.append({
                            'source': source,
                            'target': target,
                            'type': flow_type,
                            'sql_index': i,
                            'sql_type': self._classify_sql_type(sql)
                        })

            # 过滤和优化边
            filtered_edges = self._filter_meaningful_edges(all_edges, sql_analysis_results)

            # 构建图结构
            graph = {
                'nodes': [
                    {
                        'id': node,
                        'label': node,
                        'type': self._get_node_type_enhanced(node, sql_analysis_results)
                    }
                    for node in all_nodes
                ],
                'edges': filtered_edges,
                'connectors': list(all_connectors),
                'sql_analysis': sql_analysis_results,
                'statistics': {
                    'total_nodes': len(all_nodes),
                    'total_edges': len(filtered_edges),
                    'total_connectors': len(all_connectors),
                    'sql_statements_processed': len(sql_statements),
                    'original_edges': len(all_edges),
                    'filtered_edges': len(filtered_edges)
                }
            }

            logger.info(f"构建数据血缘图完成: {len(all_nodes)} 个节点, {len(all_edges)} 条边")
            return graph

        except Exception as e:
            logger.error(f"构建数据血缘图失败: {str(e)}")
            return {
                'nodes': [],
                'edges': [],
                'connectors': [],
                'statistics': {'error': str(e)}
            }

    def _get_node_type(self, node: str, connectors: Set[str]) -> str:
        """确定节点类型"""
        node_lower = node.lower()

        # 检查是否为连接器
        for connector in connectors:
            if node_lower in connector.lower():
                if 'kafka' in connector.lower():
                    return 'kafka_topic'
                elif 'cdc' in connector.lower():
                    return 'cdc_source'
                elif 'catalog' in connector.lower():
                    return 'catalog'
                else:
                    return 'connector'

        # 检查是否为临时表
        if node_lower in [t.lower() for t in self.temp_tables]:
            return 'temp_table'

        # 检查是否为视图
        if node_lower in [v.lower() for v in self.views]:
            return 'view'

        # 检查是否为目标表
        if node_lower in [t.lower() for t in self.target_tables]:
            return 'target_table'

        # 默认为源表
        return 'source_table'

    def _classify_sql_type(self, sql: str) -> str:
        """分类SQL语句类型"""
        sql_upper = sql.upper().strip()

        if sql_upper.startswith('CREATE CATALOG'):
            return 'catalog_definition'
        elif sql_upper.startswith('EXECUTE CDCSOURCE'):
            return 'cdc_execution'
        elif sql_upper.startswith('CREATE TABLE'):
            return 'table_creation'
        elif sql_upper.startswith('CREATE VIEW'):
            return 'view_creation'
        elif sql_upper.startswith('INSERT INTO'):
            return 'data_insertion'
        elif sql_upper.startswith('SELECT'):
            return 'data_query'
        elif sql_upper.startswith('SET '):
            return 'configuration'
        else:
            return 'unknown'

    def _get_node_type_enhanced(self, node: str, sql_analysis_results: List[Dict]) -> str:
        """增强的节点类型判断"""
        node_lower = node.lower()

        # 统计这个节点在不同SQL中的角色
        as_source_count = 0
        as_target_count = 0
        as_temp_count = 0
        as_view_count = 0

        for sql_result in sql_analysis_results:
            if node_lower in [t.lower() for t in sql_result['source_tables']]:
                as_source_count += 1
            if node_lower in [t.lower() for t in sql_result['target_tables']]:
                as_target_count += 1
            if node_lower in [t.lower() for t in sql_result['temp_tables']]:
                as_temp_count += 1
            if node_lower in [t.lower() for t in sql_result['views']]:
                as_view_count += 1

        # 根据出现频率判断主要类型
        if as_view_count > 0:
            return 'view'
        elif as_temp_count > 0:
            return 'temp_table'
        elif as_target_count > as_source_count:
            return 'target_table'
        elif as_source_count > 0:
            return 'source_table'
        else:
            return 'unknown_table'

    def _filter_meaningful_edges(self, edges: List[Dict], sql_analysis_results: List[Dict]) -> List[Dict]:
        """过滤出有意义的边，减少图表复杂度"""
        try:
            # 按SQL类型分类边
            meaningful_edges = []
            edge_counts = {}

            for edge in edges:
                edge_key = f"{edge['source']}->{edge['target']}"

                # 统计相同边的出现次数
                if edge_key not in edge_counts:
                    edge_counts[edge_key] = []
                edge_counts[edge_key].append(edge)

            # 只保留真实的数据转换边
            for edge_key, edge_list in edge_counts.items():
                # 如果同一条边出现多次，只保留一条
                representative_edge = edge_list[0]

                # 检查是否是有意义的数据流
                if self._is_meaningful_data_flow(representative_edge, sql_analysis_results):
                    meaningful_edges.append(representative_edge)

            logger.info(f"边过滤: {len(edges)} -> {len(meaningful_edges)}")
            return meaningful_edges

        except Exception as e:
            logger.error(f"边过滤失败: {str(e)}")
            return edges

    def _is_meaningful_data_flow(self, edge: Dict, sql_analysis_results: List[Dict]) -> bool:
        """判断是否是有意义的数据流"""
        try:
            source = edge['source']
            target = edge['target']
            sql_index = edge.get('sql_index', -1)

            # 避免自循环
            if source == target:
                return False

            # 过滤掉无意义的表名
            if not self._is_valid_table_name(source) or not self._is_valid_table_name(target):
                return False

            # 检查对应的SQL语句类型
            if sql_index >= 0 and sql_index < len(sql_analysis_results):
                sql_result = sql_analysis_results[sql_index]

                # 只保留真实的数据转换SQL（INSERT INTO ... SELECT FROM）
                if (source in sql_result['source_tables'] and
                    target in sql_result['target_tables']):
                    return True

                # 保留临时表相关的流向
                if (source in sql_result['temp_tables'] or
                    target in sql_result['temp_tables']):
                    return True

            return False

        except Exception as e:
            logger.error(f"判断数据流意义失败: {str(e)}")
            return False  # 出错时不保留

    def _is_valid_table_name(self, table_name: str) -> bool:
        """判断是否是有效的表名"""
        if not table_name or len(table_name.strip()) == 0:
            return False

        # 过滤掉单字母或过短的名称
        if len(table_name) <= 2:
            return False

        # 过滤掉明显不是表名的字符串
        invalid_patterns = ['set ', 'create ', 'insert ', 'select ', 'from ', 'where ']
        table_lower = table_name.lower()
        for pattern in invalid_patterns:
            if pattern in table_lower:
                return False

        return True

    def _parse_with_regex_enhanced(self, sql: str) -> Dict[str, Any]:
        """增强的正则表达式解析"""
        result = {
            'success': False,
            'tables_found': 0,
            'error': None
        }

        try:
            # 使用原有的正则表达式解析方法
            self._parse_with_regex(sql)

            # 统计找到的表数量
            total_tables = len(self.source_tables) + len(self.target_tables) + len(self.temp_tables) + len(self.views)
            result['tables_found'] = total_tables

            if total_tables > 0:
                result['success'] = True

        except Exception as e:
            result['error'] = str(e)
            logger.error(f"增强正则表达式解析失败: {str(e)}")

        return result

    def build_task_level_lineage_graph(self, task_sql_map: Dict[int, str], task_name_map: Dict[int, str] = None) -> Dict[str, Any]:
        """
        构建任务级别的数据血缘图 - 基于连接器实际标识符建立任务间连接

        Args:
            task_sql_map: 任务ID到SQL语句的映射 {task_id: sql_statement}
            task_name_map: 任务ID到任务名称的映射

        Returns:
            任务级别的血缘图信息，包含基于连接器标识符的任务节点和任务间连接
        """
        try:
            logger.info(f"开始构建基于连接器标识符的任务级别血缘图，共 {len(task_sql_map)} 个任务")

            # 解析每个任务的SQL，重点提取连接器标识符
            task_analysis = {}
            connector_producers = {}  # connector_identifier -> task_id (生产者)
            connector_consumers = {}  # connector_identifier -> [task_ids] (消费者)

            # 第一步：解析所有任务的SQL，提取连接器标识符和角色
            for task_id, sql_statement in task_sql_map.items():
                logger.info(f"解析任务 {task_id} 的连接器配置")

                # 确保每次解析前都重置状态
                self._reset()

                result = self.parse_flink_sql_advanced(sql_statement)
                result['sql'] = sql_statement
                task_analysis[task_id] = result

                # 提取连接器标识符和确定角色
                self._extract_and_analyze_connectors(task_id, sql_statement, result,
                                                   connector_producers, connector_consumers)

            # 第二步：基于连接器标识符建立任务间的依赖关系
            task_edges = []

            logger.info(f"连接器生产者: {len(connector_producers)} 个")
            logger.info(f"连接器消费者: {len(connector_consumers)} 个")

            # 基于连接器标识符建立连接（核心改进）
            for connector_identifier, producer_task in connector_producers.items():
                if connector_identifier in connector_consumers:
                    for consumer_task in connector_consumers[connector_identifier]:
                        if producer_task != consumer_task:
                            # 解析连接器类型和实际标识符
                            connector_type, actual_identifier = self._parse_connector_identifier(connector_identifier)

                            task_edges.append({
                                'id': f'task_edge_{producer_task}_{consumer_task}_{len(task_edges)}',
                                'source': f'task_{producer_task}',
                                'target': f'task_{consumer_task}',
                                'type': 'task_dependency',
                                'dependency_type': connector_type,
                                'connector_id': actual_identifier,
                                'connector_type': connector_type,
                                'common_tables': []
                            })
                            logger.info(f"建立连接器依赖: 任务{producer_task} -> 任务{consumer_task} "
                                      f"(连接器: {actual_identifier}, 类型: {connector_type})")

            # 第三步：创建任务节点，包含连接器标识符信息
            task_nodes = []
            for task_id in task_sql_map.keys():
                analysis = task_analysis[task_id]
                task_name = task_name_map.get(task_id, f'Task {task_id}') if task_name_map else f'Task {task_id}'

                # 收集任务的连接器信息
                task_connectors = self._extract_task_connectors(task_id, analysis,
                                                              connector_producers, connector_consumers)

                task_nodes.append({
                    'id': f'task_{task_id}',
                    'task_id': task_id,
                    'task_name': task_name,
                    'label': task_name,
                    'type': 'task',
                    'node_count': len(analysis['source_tables']) + len(analysis['target_tables']),
                    'edge_count': len(analysis.get('data_flows', [])),
                    'source_tables': list(analysis['source_tables']),
                    'target_tables': list(analysis['target_tables']),
                    'connectors': task_connectors,  # 使用提取的连接器标识符信息
                    'internal_nodes': self._build_internal_nodes(analysis),
                    'internal_edges': self._build_internal_edges(analysis)
                })

            logger.info(f"基于连接器标识符的任务级别血缘图构建完成: {len(task_nodes)} 个任务节点, {len(task_edges)} 条任务连接")

            return {
                'task_level_nodes': task_nodes,
                'task_level_edges': task_edges,
                'task_lineage_results': [
                    {
                        'task_id': task_id,
                        'task_name': task_name_map.get(task_id, f'Task {task_id}') if task_name_map else f'Task {task_id}',
                        'nodes': self._build_internal_nodes(analysis),
                        'edges': self._build_internal_edges(analysis),
                        'node_count': len(analysis['source_tables']) + len(analysis['target_tables']),
                        'edge_count': len(analysis.get('data_flows', []))
                    }
                    for task_id, analysis in task_analysis.items()
                ],
                'statistics': {
                    'total_tasks': len(task_sql_map),
                    'total_task_connections': len(task_edges),
                    'connector_based_connections': len(task_edges),
                    'unique_connectors': len(set(connector_producers.keys()) | set(connector_consumers.keys()))
                }
            }

        except Exception as e:
            logger.error(f"构建任务级别血缘图失败: {str(e)}")
            return {
                'task_level_nodes': [],
                'task_level_edges': [],
                'task_lineage_results': [],
                'statistics': {'error': str(e)}
            }

    def _is_connector_producer(self, connector: Dict, sql_statement: str) -> bool:
        """判断连接器是否为生产者（数据输出）"""
        try:
            sql_upper = sql_statement.upper()
            connector_name = connector.get('name', '').lower()
            connector_type = connector.get('type', '').lower()

            # 如果SQL中包含INSERT INTO，且连接器在INSERT INTO子句中，则为生产者
            if 'INSERT INTO' in sql_upper:
                insert_pattern = r'INSERT\s+INTO\s+([^\s\(]+)'
                matches = re.findall(insert_pattern, sql_upper)
                for match in matches:
                    if connector_name in match.lower():
                        return True

            # 根据连接器类型判断
            if 'sink' in connector_type or 'output' in connector_type:
                return True
            elif 'source' in connector_type or 'input' in connector_type:
                return False

            # 默认情况下，如果在CREATE TABLE语句中定义，需要进一步分析
            return False

        except Exception as e:
            logger.warning(f"判断连接器生产者失败: {str(e)}")
            return False

    def _determine_connector_type(self, connector_id: str, task_analysis: Dict) -> str:
        """确定连接器类型"""
        connector_id_lower = connector_id.lower()

        # 根据名称模式判断
        if 'kafka' in connector_id_lower:
            if 'upsert' in connector_id_lower:
                return 'upsert_kafka_topic'
            else:
                return 'kafka_topic'
        elif 'cdc' in connector_id_lower or 'sqlserver' in connector_id_lower:
            return 'sqlserver_cdc_table'
        elif 'pulsar' in connector_id_lower:
            return 'pulsar_topic'
        elif 'redis' in connector_id_lower:
            return 'redis_key'
        elif 'elasticsearch' in connector_id_lower or 'es' in connector_id_lower:
            return 'elasticsearch_index'
        elif 'hbase' in connector_id_lower:
            return 'hbase_table'
        elif 'mongodb' in connector_id_lower or 'mongo' in connector_id_lower:
            return 'mongodb_collection'
        elif 'hdfs' in connector_id_lower:
            return 'hdfs_path'
        elif 's3' in connector_id_lower:
            return 's3_path'
        else:
            return 'shared_table'

    def _extract_table_schema(self, sql: str, table_name: str) -> Dict:
        """从CREATE TABLE语句中提取表的字段信息"""
        schema_info = {
            'columns': [],
            'properties': {},
            'constraints': [],
            'with_options': {}
        }

        try:
            # 更灵活的CREATE TABLE匹配，处理大小写和空格
            escaped_table = re.escape(table_name)
            # 匹配CREATE TABLE语句，支持多种格式
            patterns = [
                rf'CREATE\s+TABLE\s+{escaped_table}\s*\((.*?)\)\s*(?:WITH|with)\s*\((.*?)\)',
                rf'CREATE\s+TABLE\s+{escaped_table}\s*\((.*?)\)\s*(?:WITH|with)\s*\n\s*\((.*?)\)',
                rf'CREATE\s+TABLE\s+{escaped_table}\s*\((.*?)\)(?:\s*WITH|\s*with|\s*$)',
                rf'create\s+table\s+{escaped_table}\s*\((.*?)\)\s*(?:with|WITH)\s*\((.*?)\)',
                rf'create\s+table\s+{escaped_table}\s*\((.*?)\)(?:\s*with|\s*WITH|\s*$)'
            ]

            match = None
            for pattern in patterns:
                match = re.search(pattern, sql, re.IGNORECASE | re.DOTALL)
                if match:
                    break

            if match:
                columns_part = match.group(1)
                with_part = match.group(2) if len(match.groups()) > 1 and match.group(2) else ""

                logger.info(f"找到表 {table_name} 的CREATE语句，字段部分长度: {len(columns_part)}")

                # 解析字段定义
                if columns_part:
                    schema_info['columns'] = self._parse_column_definitions(columns_part)

                # 解析WITH选项
                if with_part:
                    schema_info['with_options'] = self._parse_with_options(with_part)
            else:
                logger.warning(f"未找到表 {table_name} 的CREATE TABLE语句")

        except Exception as e:
            logger.error(f"解析表 {table_name} 的字段信息失败: {str(e)}")

        return schema_info

    def _parse_column_definitions(self, columns_text: str) -> List[Dict]:
        """解析字段定义"""
        columns = []

        try:
            logger.info(f"开始解析字段定义，原始文本长度: {len(columns_text)}")

            # 清理文本，移除多余的空白字符
            cleaned_text = re.sub(r'\s+', ' ', columns_text.strip())

            # 分割字段定义（处理嵌套括号和PRIMARY KEY约束）
            column_defs = self._split_column_definitions(cleaned_text)

            logger.info(f"分割出 {len(column_defs)} 个字段定义")

            for i, col_def in enumerate(column_defs):
                col_def = col_def.strip()
                if not col_def:
                    continue

                # 跳过PRIMARY KEY约束定义
                if col_def.upper().startswith('PRIMARY KEY'):
                    logger.info(f"跳过PRIMARY KEY约束: {col_def}")
                    continue

                logger.info(f"解析字段 {i+1}: {col_def}")
                column_info = self._parse_single_column(col_def)
                if column_info:
                    columns.append(column_info)
                    logger.info(f"成功解析字段: {column_info['name']} {column_info['type']}")
                else:
                    logger.warning(f"无法解析字段定义: {col_def}")

        except Exception as e:
            logger.error(f"解析字段定义失败: {str(e)}")
            import traceback
            traceback.print_exc()

        return columns

    def _split_column_definitions(self, text: str) -> List[str]:
        """智能分割字段定义，处理嵌套括号"""
        definitions = []
        current_def = ""
        paren_count = 0

        for char in text:
            if char == '(':
                paren_count += 1
            elif char == ')':
                paren_count -= 1
            elif char == ',' and paren_count == 0:
                if current_def.strip():
                    definitions.append(current_def.strip())
                current_def = ""
                continue

            current_def += char

        if current_def.strip():
            definitions.append(current_def.strip())

        return definitions

    def _parse_single_column(self, col_def: str) -> Dict:
        """解析单个字段定义"""
        try:
            # 更智能的字段解析
            # 支持格式：字段名 数据类型(长度) [约束] [COMMENT '注释']
            # 例如：store_day date, store_type varchar(50), id BIGINT NOT NULL COMMENT 'ID'

            # 使用正则表达式匹配字段定义
            # 匹配：字段名 + 数据类型(可能包含长度) + 可选的约束和注释
            pattern = r'^(\w+)\s+(\w+(?:\([^)]+\))?)\s*(.*?)$'
            match = re.match(pattern, col_def.strip(), re.IGNORECASE)

            if not match:
                logger.warning(f"无法匹配字段定义格式: {col_def}")
                return None

            field_name = match.group(1).strip('`"\'')
            field_type = match.group(2).upper()
            remaining = match.group(3) if match.group(3) else ""

            column_info = {
                'name': field_name,
                'type': field_type,
                'nullable': True,
                'comment': '',
                'constraints': [],
                'properties': {}
            }

            # 解析剩余部分的约束和注释
            remaining_upper = remaining.upper()

            # 检查NOT NULL
            if 'NOT NULL' in remaining_upper:
                column_info['nullable'] = False
                column_info['constraints'].append('NOT NULL')

            # 检查PRIMARY KEY
            if 'PRIMARY KEY' in remaining_upper:
                column_info['constraints'].append('PRIMARY KEY')

            # 检查UNIQUE
            if 'UNIQUE' in remaining_upper:
                column_info['constraints'].append('UNIQUE')

            # 提取注释（支持中文注释）
            comment_patterns = [
                r"COMMENT\s+['\"]([^'\"]*)['\"]",
                r"COMMENT\s+'([^']*)'",
                r'COMMENT\s+"([^"]*)"'
            ]

            for pattern in comment_patterns:
                comment_match = re.search(pattern, remaining, re.IGNORECASE)
                if comment_match:
                    column_info['comment'] = comment_match.group(1)
                    break

            # 提取默认值
            default_match = re.search(r"DEFAULT\s+([^\s,]+)", remaining_upper)
            if default_match:
                column_info['default'] = default_match.group(1)

            return column_info

        except Exception as e:
            logger.error(f"解析字段定义失败: {col_def}, 错误: {str(e)}")
            return None

    def _parse_with_options(self, with_text: str) -> Dict:
        """解析WITH选项"""
        options = {}

        try:
            logger.info(f"解析WITH选项，文本长度: {len(with_text)}")

            # 清理文本，移除多余的空白和换行
            cleaned_text = re.sub(r'\s+', ' ', with_text.strip())

            # 更精确的WITH选项解析，支持包含逗号的值
            # 使用更智能的分割方式，避免在引号内分割
            option_pairs = self._split_with_options(cleaned_text)

            for pair in option_pairs:
                pair = pair.strip()
                if not pair:
                    continue

                # 匹配 key = value 格式
                match = re.match(r"['\"]?([^'\"=]+)['\"]?\s*=\s*['\"]?([^'\"]*)['\"]?", pair)
                if match:
                    key = match.group(1).strip()
                    value = match.group(2).strip()
                    options[key] = value
                    logger.info(f"解析WITH选项: {key} = {value}")

        except Exception as e:
            logger.error(f"解析WITH选项失败: {str(e)}")

        return options

    def _split_with_options(self, text: str) -> List[str]:
        """智能分割WITH选项，避免在引号内分割"""
        options = []
        current_option = ""
        in_quotes = False
        quote_char = None

        i = 0
        while i < len(text):
            char = text[i]

            if char in ['"', "'"] and (i == 0 or text[i-1] != '\\'):
                if not in_quotes:
                    in_quotes = True
                    quote_char = char
                elif char == quote_char:
                    in_quotes = False
                    quote_char = None
            elif char == ',' and not in_quotes:
                if current_option.strip():
                    options.append(current_option.strip())
                current_option = ""
                i += 1
                continue

            current_option += char
            i += 1

        if current_option.strip():
            options.append(current_option.strip())

        return options

    def _build_internal_nodes(self, analysis: Dict) -> List[Dict]:
        """构建任务内部节点"""
        nodes = []
        node_id_counter = 0
        sql = analysis.get('sql', '')

        # 添加源表节点
        for table in analysis['source_tables']:
            schema_info = self._extract_table_schema(sql, table)
            nodes.append({
                'id': f'node_{node_id_counter}',
                'label': table,
                'type': 'source_table',
                'color': '#52c41a',
                'table_name': table,
                'columns': schema_info['columns'],
                'properties': schema_info['properties'],
                'with_options': schema_info['with_options']
            })
            node_id_counter += 1

        # 添加目标表节点
        for table in analysis['target_tables']:
            schema_info = self._extract_table_schema(sql, table)
            nodes.append({
                'id': f'node_{node_id_counter}',
                'label': table,
                'type': 'target_table',
                'color': '#1890ff',
                'table_name': table,
                'columns': schema_info['columns'],
                'properties': schema_info['properties'],
                'with_options': schema_info['with_options']
            })
            node_id_counter += 1

        # 添加临时表节点
        for table in analysis['temp_tables']:
            schema_info = self._extract_table_schema(sql, table)
            nodes.append({
                'id': f'node_{node_id_counter}',
                'label': table,
                'type': 'temp_table',
                'color': '#faad14',
                'table_name': table,
                'columns': schema_info['columns'],
                'properties': schema_info['properties'],
                'with_options': schema_info['with_options']
            })
            node_id_counter += 1

        # 添加视图节点
        for view in analysis['views']:
            nodes.append({
                'id': f'node_{node_id_counter}',
                'label': view,
                'type': 'view',
                'color': '#722ed1'
            })
            node_id_counter += 1

        return nodes

    def _build_internal_edges(self, analysis: Dict) -> List[Dict]:
        """构建任务内部边"""
        edges = []
        edge_id_counter = 0

        # 创建节点名称到ID的映射
        node_map = {}
        for i, table in enumerate(analysis['source_tables']):
            node_map[table] = f'node_{i}'

        offset = len(analysis['source_tables'])
        for i, table in enumerate(analysis['target_tables']):
            node_map[table] = f'node_{offset + i}'

        offset += len(analysis['target_tables'])
        for i, table in enumerate(analysis['temp_tables']):
            node_map[table] = f'node_{offset + i}'

        offset += len(analysis['temp_tables'])
        for i, view in enumerate(analysis['views']):
            node_map[view] = f'node_{offset + i}'

        # 基于数据流创建边
        for source, target, flow_type in analysis.get('data_flows', []):
            if source in node_map and target in node_map:
                edges.append({
                    'id': f'edge_{edge_id_counter}',
                    'source': node_map[source],
                    'target': node_map[target],
                    'type': flow_type
                })
                edge_id_counter += 1

        return edges

    def _analyze_kafka_topics(self, result: Dict, task_id: int, sql_statement: str,
                             connector_producers: Dict, connector_consumers: Dict):
        """分析Kafka topics的生产者和消费者关系"""
        try:
            # 方法1: 从节点中收集Kafka表和对应的topic
            kafka_tables = {}  # table_name -> topic_name

            for node in result.get('nodes', []):
                with_options = node.get('with_options', {})
                if (with_options and isinstance(with_options, dict) and
                    with_options.get('connector') == 'upsert-kafka'):
                    topic = with_options.get('topic', '')
                    table_name = node.get('label', '').lower()
                    if topic and table_name:
                        kafka_tables[table_name] = topic

            # 方法2: 如果节点解析失败，直接从SQL中提取Kafka表信息
            if not kafka_tables:
                kafka_tables = self._extract_kafka_tables_from_sql(sql_statement)
                logger.info(f"从SQL中提取到 {len(kafka_tables)} 个Kafka表: {kafka_tables}")

            # 分析INSERT INTO语句来确定生产者
            sql_upper = sql_statement.upper()
            insert_patterns = [
                r'INSERT\s+INTO\s+([`\w\.\-]+)',
                r'INSERT\s+OVERWRITE\s+([`\w\.\-]+)'
            ]

            produced_topics = set()
            for pattern in insert_patterns:
                matches = re.findall(pattern, sql_upper)
                for match in matches:
                    table_name = self._clean_table_name(match).lower()
                    if table_name in kafka_tables:
                        topic = kafka_tables[table_name]
                        produced_topics.add(topic)
                        connector_key = f"kafka_topic:{topic}"
                        connector_producers[connector_key] = task_id
                        logger.info(f"任务 {task_id} 生产 Kafka topic: {topic} (通过表 {table_name})")

            # 其他Kafka表作为消费者
            for table_name, topic in kafka_tables.items():
                if topic not in produced_topics:
                    connector_key = f"kafka_topic:{topic}"
                    if connector_key not in connector_consumers:
                        connector_consumers[connector_key] = []
                    if task_id not in connector_consumers[connector_key]:
                        connector_consumers[connector_key].append(task_id)
                        logger.info(f"任务 {task_id} 消费 Kafka topic: {topic} (通过表 {table_name})")

        except Exception as e:
            logger.error(f"分析Kafka topics失败: {str(e)}")

    def _extract_kafka_tables_from_sql(self, sql_statement: str) -> Dict[str, str]:
        """直接从SQL中提取Kafka表信息"""
        kafka_tables = {}
        try:
            # 改进的正则表达式，使用更灵活的匹配
            # 匹配CREATE TABLE ... WITH(...)模式
            pattern = r"CREATE\s+TABLE\s+([`\w\.\-]+)\s*\(.*?\)\s*with\s*\(\s*(.*?)\s*\)"
            matches = re.findall(pattern, sql_statement, re.IGNORECASE | re.DOTALL)

            logger.info(f"在SQL中找到 {len(matches)} 个CREATE TABLE语句")

            for table_match, with_clause in matches:
                table_name = self._clean_table_name(table_match).lower()
                logger.info(f"分析表: {table_name}, WITH子句: {with_clause[:100]}...")

                # 检查是否是upsert-kafka连接器
                if re.search(r"['\"]connector['\"]?\s*=\s*['\"]upsert-kafka['\"]", with_clause, re.IGNORECASE):
                    # 提取topic
                    topic_pattern = r"['\"]topic['\"]?\s*=\s*['\"]([^'\"]+)['\"]"
                    topic_match = re.search(topic_pattern, with_clause, re.IGNORECASE)
                    if topic_match:
                        topic = topic_match.group(1)
                        kafka_tables[table_name] = topic
                        logger.info(f"从SQL中提取Kafka表: {table_name} -> topic: {topic}")
                    else:
                        logger.warning(f"表 {table_name} 是upsert-kafka连接器但未找到topic")
                else:
                    logger.debug(f"表 {table_name} 不是upsert-kafka连接器")

            return kafka_tables

        except Exception as e:
            logger.error(f"从SQL提取Kafka表失败: {str(e)}")
            return {}

    def _extract_connector_identifiers(self, sql: str, result: Dict) -> None:
        """提取连接器的实际标识符 - 核心改进方法"""
        try:
            logger.info("开始提取连接器实际标识符...")

            # 解析CREATE TABLE语句中的WITH子句
            create_table_pattern = r'CREATE\s+TABLE\s+([`\w\.\-]+)\s*\([^)]*\)\s*WITH\s*\((.*?)\)'
            matches = re.findall(create_table_pattern, sql, re.IGNORECASE | re.DOTALL)

            for table_match, with_clause in matches:
                table_name = self._clean_table_name(table_match).lower()
                logger.info(f"分析表 {table_name} 的连接器配置...")

                # 解析WITH子句中的配置项
                config = self._parse_with_options(with_clause)
                connector_type = config.get('connector', '').lower()

                if not connector_type:
                    continue

                # 根据连接器类型提取实际标识符
                identifier = None
                identifier_type = None

                if connector_type == 'kafka':
                    # Kafka连接器：使用topic作为标识符
                    identifier = config.get('topic')
                    identifier_type = 'kafka_topic'
                    if identifier:
                        result['kafka_topics'].add(identifier)

                elif connector_type == 'upsert-kafka':
                    # Upsert Kafka连接器：使用topic作为标识符
                    identifier = config.get('topic')
                    identifier_type = 'upsert_kafka_topic'
                    if identifier:
                        result['upsert_kafka_topics'].add(identifier)

                elif connector_type == 'doris':
                    # Doris连接器：使用table.identifier或构建表名
                    identifier = config.get('table.identifier')
                    if not identifier:
                        # 如果没有table.identifier，尝试构建
                        database = config.get('fenodes.database') or config.get('database')
                        table = config.get('table.name') or config.get('table')
                        if database and table:
                            identifier = f"{database}.{table}"
                    identifier_type = 'doris_table'
                    if identifier:
                        if 'doris_tables' not in result:
                            result['doris_tables'] = set()
                        result['doris_tables'].add(identifier)

                elif connector_type == 'jdbc':
                    # JDBC连接器：使用table-name作为标识符
                    identifier = config.get('table-name')
                    identifier_type = 'jdbc_table'

                elif connector_type == 'elasticsearch-7':
                    # Elasticsearch连接器：使用index作为标识符
                    identifier = config.get('index')
                    identifier_type = 'elasticsearch_index'

                elif connector_type == 'redis':
                    # Redis连接器：使用key pattern作为标识符
                    identifier = config.get('command') or config.get('key.pattern')
                    identifier_type = 'redis_key'

                elif connector_type == 'hbase-2.2':
                    # HBase连接器：使用table-name作为标识符
                    identifier = config.get('table-name')
                    identifier_type = 'hbase_table'

                elif connector_type == 'mongodb':
                    # MongoDB连接器：使用collection作为标识符
                    identifier = config.get('collection')
                    identifier_type = 'mongodb_collection'

                elif connector_type == 'filesystem':
                    # 文件系统连接器：使用path作为标识符
                    identifier = config.get('path')
                    identifier_type = 'filesystem_path'

                # 记录连接器标识符映射
                if identifier and identifier_type:
                    if 'connector_identifiers' not in result:
                        result['connector_identifiers'] = {}

                    result['connector_identifiers'][table_name] = {
                        'connector_type': connector_type,
                        'identifier': identifier,
                        'identifier_type': identifier_type,
                        'config': config
                    }

                    # 添加到连接器集合
                    self.connectors.add(f"{identifier_type}:{identifier}")

                    logger.info(f"提取连接器标识符: 表={table_name}, 类型={connector_type}, 标识符={identifier}")
                else:
                    logger.warning(f"无法提取表 {table_name} 的连接器标识符，连接器类型: {connector_type}")

        except Exception as e:
            logger.error(f"提取连接器标识符失败: {str(e)}")

    def _determine_connector_role(self, table_name: str, sql: str, connector_info: Dict) -> str:
        """确定连接器在SQL中的角色（生产者/消费者）"""
        try:
            sql_upper = sql.upper()
            table_upper = table_name.upper()

            # 检查是否在INSERT INTO语句中作为目标表
            insert_pattern = rf'INSERT\s+INTO\s+{re.escape(table_upper)}'
            if re.search(insert_pattern, sql_upper):
                return 'producer'  # 生产者

            # 检查是否在FROM或JOIN子句中作为源表
            from_pattern = rf'FROM\s+{re.escape(table_upper)}'
            join_pattern = rf'JOIN\s+{re.escape(table_upper)}'
            if re.search(from_pattern, sql_upper) or re.search(join_pattern, sql_upper):
                return 'consumer'  # 消费者

            # 默认根据连接器类型判断
            connector_type = connector_info.get('connector_type', '').lower()
            if connector_type in ['kafka', 'upsert-kafka']:
                # Kafka通常是消费者，除非明确是sink
                if 'sink' in connector_info.get('config', {}):
                    return 'producer'
                return 'consumer'
            elif connector_type in ['doris', 'jdbc', 'elasticsearch-7']:
                # 数据库通常是生产者
                return 'producer'

            return 'unknown'

        except Exception as e:
            logger.error(f"确定连接器角色失败: {str(e)}")
            return 'unknown'

    def _extract_and_analyze_connectors(self, task_id: int, sql_statement: str, result: Dict,
                                      connector_producers: Dict, connector_consumers: Dict) -> None:
        """提取并分析连接器标识符，确定生产者和消费者角色"""
        try:
            logger.info(f"分析任务 {task_id} 的连接器标识符...")

            # 直接从SQL重新提取连接器标识符（确保数据一致性）
            connector_identifiers = self._extract_connector_identifiers_direct(sql_statement)

            if not connector_identifiers:
                logger.warning(f"任务 {task_id} 未找到连接器标识符")
                return

            for table_name, connector_info in connector_identifiers.items():
                identifier = connector_info['identifier']
                identifier_type = connector_info['identifier_type']
                connector_key = f"{identifier_type}:{identifier}"

                # 确定连接器角色
                role = self._determine_connector_role(table_name, sql_statement, connector_info)

                if role == 'producer':
                    connector_producers[connector_key] = task_id
                    logger.info(f"任务 {task_id} 生产连接器: {connector_key}")
                elif role == 'consumer':
                    if connector_key not in connector_consumers:
                        connector_consumers[connector_key] = []
                    connector_consumers[connector_key].append(task_id)
                    logger.info(f"任务 {task_id} 消费连接器: {connector_key}")
                else:
                    logger.warning(f"任务 {task_id} 的连接器 {connector_key} 角色未知")

        except Exception as e:
            logger.error(f"提取和分析连接器失败: {str(e)}")

    def _extract_connector_identifiers_direct(self, sql: str) -> Dict:
        """直接从SQL提取连接器标识符"""
        try:
            connector_identifiers = {}

            logger.info(f"开始直接提取连接器标识符，SQL长度: {len(sql)}")

            # 解析CREATE TABLE语句中的WITH子句 - 改进的正则表达式
            # 处理多行和嵌套括号的情况
            create_table_pattern = r'CREATE\s+TABLE\s+([`\w\.\-]+)\s*\([^)]*(?:\([^)]*\)[^)]*)*\)\s*WITH\s*\(((?:[^()]|\([^)]*\))*)\)'
            matches = re.findall(create_table_pattern, sql, re.IGNORECASE | re.DOTALL)

            logger.info(f"找到 {len(matches)} 个CREATE TABLE语句")

            for table_match, with_clause in matches:
                table_name = self._clean_table_name(table_match).lower()
                logger.info(f"处理表 {table_name}，WITH子句长度: {len(with_clause)}")

                # 解析WITH子句中的配置项
                config = self._parse_with_options(with_clause)
                connector_type = config.get('connector', '').lower()

                logger.info(f"表 {table_name} 的连接器类型: {connector_type}")

                if not connector_type:
                    logger.warning(f"表 {table_name} 没有连接器类型")
                    continue

                # 根据连接器类型提取实际标识符
                identifier = None
                identifier_type = None

                if connector_type == 'kafka':
                    identifier = config.get('topic')
                    identifier_type = 'kafka_topic'
                elif connector_type == 'upsert-kafka':
                    identifier = config.get('topic')
                    identifier_type = 'upsert_kafka_topic'
                elif connector_type == 'doris':
                    identifier = config.get('table.identifier')
                    if not identifier:
                        database = config.get('fenodes.database') or config.get('database')
                        table = config.get('table.name') or config.get('table')
                        if database and table:
                            identifier = f"{database}.{table}"
                    identifier_type = 'doris_table'
                elif connector_type == 'jdbc':
                    identifier = config.get('table-name')
                    identifier_type = 'jdbc_table'
                elif connector_type == 'elasticsearch-7':
                    identifier = config.get('index')
                    identifier_type = 'elasticsearch_index'
                elif connector_type == 'redis':
                    identifier = config.get('command') or config.get('key.pattern')
                    identifier_type = 'redis_key'
                elif connector_type == 'hbase-2.2':
                    identifier = config.get('table-name')
                    identifier_type = 'hbase_table'
                elif connector_type == 'mongodb':
                    identifier = config.get('collection')
                    identifier_type = 'mongodb_collection'
                elif connector_type == 'filesystem':
                    identifier = config.get('path')
                    identifier_type = 'filesystem_path'

                # 记录连接器标识符映射
                if identifier and identifier_type:
                    connector_identifiers[table_name] = {
                        'connector_type': connector_type,
                        'identifier': identifier,
                        'identifier_type': identifier_type,
                        'config': config
                    }
                    logger.info(f"直接提取连接器标识符: 表={table_name}, 类型={connector_type}, 标识符={identifier}")

            return connector_identifiers

        except Exception as e:
            logger.error(f"直接提取连接器标识符失败: {str(e)}")
            return {}

    def _parse_connector_identifier(self, connector_identifier: str) -> tuple:
        """解析连接器标识符，返回(连接器类型, 实际标识符)"""
        try:
            if ':' in connector_identifier:
                connector_type, actual_identifier = connector_identifier.split(':', 1)
                return connector_type, actual_identifier
            else:
                return 'unknown', connector_identifier
        except Exception as e:
            logger.error(f"解析连接器标识符失败: {str(e)}")
            return 'unknown', connector_identifier

    def _extract_task_connectors(self, task_id: int, analysis: Dict,
                               connector_producers: Dict, connector_consumers: Dict) -> List[Dict]:
        """提取任务的连接器信息"""
        try:
            connectors = []

            # 直接从SQL重新提取连接器标识符
            sql_statement = analysis.get('sql', '')
            if not sql_statement:
                logger.warning(f"任务 {task_id} 没有SQL语句")
                return []

            connector_identifiers = self._extract_connector_identifiers_direct(sql_statement)

            for table_name, connector_info in connector_identifiers.items():
                identifier = connector_info['identifier']
                identifier_type = connector_info['identifier_type']
                connector_key = f"{identifier_type}:{identifier}"

                # 确定角色
                role = 'unknown'
                if connector_key in connector_producers and connector_producers[connector_key] == task_id:
                    role = 'producer'
                elif connector_key in connector_consumers and task_id in connector_consumers[connector_key]:
                    role = 'consumer'

                connectors.append({
                    'table_name': table_name,
                    'connector_type': connector_info['connector_type'],
                    'identifier': identifier,
                    'identifier_type': identifier_type,
                    'role': role,
                    'config': connector_info.get('config', {})
                })

            return connectors

        except Exception as e:
            logger.error(f"提取任务连接器信息失败: {str(e)}")
            return []

    def build_task_level_lineage_graph_enhanced(self, task_sql_map: Dict[int, str], task_name_map: Dict[int, str] = None) -> Dict[str, Any]:
        """
        构建任务级别的数据血缘图 - 增强版，基于连接器实际标识符建立连接

        Args:
            task_sql_map: 任务ID到SQL语句的映射 {task_id: sql_statement}
            task_name_map: 任务ID到任务名称的映射

        Returns:
            任务级别的血缘图信息，包含任务节点和基于连接器标识符的任务间连接
        """
        try:
            logger.info(f"开始构建增强版任务级别血缘图，共 {len(task_sql_map)} 个任务")

            # 解析每个任务的SQL，重点提取连接器标识符
            task_analysis = {}
            connector_producers = {}  # connector_identifier -> task_id
            connector_consumers = {}  # connector_identifier -> [task_ids]

            # 第一步：解析所有任务的SQL，提取连接器标识符
            for task_id, sql_statement in task_sql_map.items():
                logger.info(f"解析任务 {task_id} 的连接器配置")

                # 确保每次解析前都重置状态
                self._reset()

                result = self.parse_flink_sql_advanced(sql_statement)
                result['sql'] = sql_statement
                task_analysis[task_id] = result

                # 分析连接器标识符和角色
                connector_identifiers = result.get('parsing_strategies', {}).get('flink_specific', {}).get('connector_identifiers', {})

                for table_name, connector_info in connector_identifiers.items():
                    identifier = connector_info['identifier']
                    identifier_type = connector_info['identifier_type']
                    connector_key = f"{identifier_type}:{identifier}"

                    # 确定连接器角色
                    role = self._determine_connector_role(table_name, sql_statement, connector_info)

                    if role == 'producer':
                        connector_producers[connector_key] = task_id
                        logger.info(f"任务 {task_id} 生产连接器: {connector_key}")
                    elif role == 'consumer':
                        if connector_key not in connector_consumers:
                            connector_consumers[connector_key] = []
                        connector_consumers[connector_key].append(task_id)
                        logger.info(f"任务 {task_id} 消费连接器: {connector_key}")
                    else:
                        logger.warning(f"任务 {task_id} 的连接器 {connector_key} 角色未知")

            # 第二步：基于连接器标识符建立任务间连接
            task_edges = []

            for connector_key, producer_task in connector_producers.items():
                if connector_key in connector_consumers:
                    for consumer_task in connector_consumers[connector_key]:
                        if producer_task != consumer_task:
                            # 解析连接器类型和标识符
                            connector_type, identifier = connector_key.split(':', 1)

                            task_edges.append({
                                'id': f'task_edge_{producer_task}_{consumer_task}_{len(task_edges)}',
                                'source': f'task_{producer_task}',
                                'target': f'task_{consumer_task}',
                                'type': 'task_dependency',
                                'dependency_type': connector_type,
                                'connector_id': identifier,
                                'connector_type': connector_type,
                                'common_tables': []
                            })
                            logger.info(f"建立任务连接: 任务{producer_task} -> 任务{consumer_task} (连接器: {identifier}, 类型: {connector_type})")

            # 第三步：创建任务节点
            task_nodes = []
            for task_id in task_sql_map.keys():
                analysis = task_analysis[task_id]
                task_name = task_name_map.get(task_id, f'Task {task_id}') if task_name_map else f'Task {task_id}'

                # 收集任务的连接器信息
                connector_identifiers = analysis.get('parsing_strategies', {}).get('flink_specific', {}).get('connector_identifiers', {})
                connectors = []
                for table_name, connector_info in connector_identifiers.items():
                    connectors.append({
                        'table_name': table_name,
                        'connector_type': connector_info['connector_type'],
                        'identifier': connector_info['identifier'],
                        'identifier_type': connector_info['identifier_type']
                    })

                task_nodes.append({
                    'id': f'task_{task_id}',
                    'task_id': task_id,
                    'task_name': task_name,
                    'label': task_name,
                    'type': 'task',
                    'node_count': len(analysis['source_tables']) + len(analysis['target_tables']),
                    'edge_count': len(analysis.get('data_flows', [])),
                    'source_tables': list(analysis['source_tables']),
                    'target_tables': list(analysis['target_tables']),
                    'connectors': connectors,
                    'internal_nodes': self._build_internal_nodes(analysis),
                    'internal_edges': self._build_internal_edges(analysis)
                })

            logger.info(f"增强版任务级别血缘图构建完成: {len(task_nodes)} 个任务节点, {len(task_edges)} 条任务连接")

            return {
                'task_level_nodes': task_nodes,
                'task_level_edges': task_edges,
                'task_lineage_results': [
                    {
                        'task_id': task_id,
                        'task_name': task_name_map.get(task_id, f'Task {task_id}') if task_name_map else f'Task {task_id}',
                        'nodes': self._build_internal_nodes(analysis),
                        'edges': self._build_internal_edges(analysis),
                        'node_count': len(analysis['source_tables']) + len(analysis['target_tables']),
                        'edge_count': len(analysis.get('data_flows', []))
                    }
                    for task_id, analysis in task_analysis.items()
                ],
                'statistics': {
                    'total_tasks': len(task_sql_map),
                    'total_task_connections': len(task_edges),
                    'connector_based_connections': len(task_edges),
                    'unique_connectors': len(set(connector_producers.keys()) | set(connector_consumers.keys()))
                }
            }

        except Exception as e:
            logger.error(f"构建增强版任务级别血缘图失败: {str(e)}")
            # 回退到原始方法
            return self.build_task_level_lineage_graph(task_sql_map, task_name_map)
