from neo4j import GraphDatabase
import mysql.connector
from mysql.connector import Error
import logging
import getpass
import time
from collections import defaultdict
import re

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('multi_column_relationship_builder.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class MultiColumnRelationshipAnalyzer:
    """多列关系分析器 - 识别表间的多个列关系"""
    
    def __init__(self):
        self.relationship_patterns = self._initialize_relationship_patterns()
    
    def _initialize_relationship_patterns(self):
        """初始化关系模式"""
        return {
            # 复合主键关系模式
            'composite_keys': [
                (r'(.+?)_id$', r'(.+?)_id$'),
                (r'(.+?)_code$', r'(.+?)_code$'),
                (r'(.+?)_no$', r'(.+?)_no$'),
                (r'(.+?)编号$', r'(.+?)编号$'),
                (r'(.+?)代码$', r'(.+?)代码$')
            ],
            # 父子关系模式
            'parent_child': [
                (r'parent_(.+?)$', r'child_(.+?)$'),
                (r'主_(.+?)$', r'子_(.+?)$'),
                (r'上级_(.+?)$', r'下级_(.+?)$')
            ],
            # 时间序列关系模式
            'temporal': [
                (r'start_(.+?)$', r'end_(.+?)$'),
                (r'begin_(.+?)$', r'end_(.+?)$'),
                (r'开始_(.+?)$', r'结束_(.+?)$'),
                (r'create_(.+?)$', r'update_(.+?)$'),
                (r'创建_(.+?)$', r'更新_(.+?)$')
            ],
            # 空间关系模式
            'spatial': [
                (r'from_(.+?)$', r'to_(.+?)$'),
                (r'source_(.+?)$', r'target_(.+?)$'),
                (r'源_(.+?)$', r'目标_(.+?)$'),
                (r'起点_(.+?)$', r'终点_(.+?)$')
            ],
            # 状态关系模式
            'status': [
                (r'old_(.+?)$', r'new_(.+?)$'),
                (r'previous_(.+?)$', r'current_(.+?)$'),
                (r'旧_(.+?)$', r'新_(.+?)$'),
                (r'原_(.+?)$', r'现_(.+?)$')
            ]
        }
    
    def analyze_multi_column_relationships(self, schema):
        """分析多列关系"""
        multi_column_relationships = []
        
        # 分析每个表的列组合
        for table_name, table_info in schema["tables"].items():
            relationships = self._analyze_table_columns(table_name, table_info, schema)
            multi_column_relationships.extend(relationships)
        
        return multi_column_relationships
    
    def _analyze_table_columns(self, table_name, table_info, schema):
        """分析单个表的列组合"""
        relationships = []
        columns = table_info["columns"]
        
        # 分析列之间的内部关系
        internal_relationships = self._analyze_internal_column_relationships(columns, table_name)
        relationships.extend(internal_relationships)
        
        # 分析与其他表的多列关系
        external_relationships = self._analyze_external_multi_column_relationships(table_name, table_info, schema)
        relationships.extend(external_relationships)
        
        # 分析复合键关系
        composite_relationships = self._analyze_composite_key_relationships(table_name, table_info, schema)
        relationships.extend(composite_relationships)
        
        return relationships
    
    def _analyze_internal_column_relationships(self, columns, table_name):
        """分析表内列之间的关系"""
        relationships = []
        column_names = [col["name"] for col in columns]
        
        # 检查相关列组
        for pattern_type, patterns in self.relationship_patterns.items():
            for pattern1, pattern2 in patterns:
                matched_pairs = self._find_matching_column_pairs(column_names, pattern1, pattern2)
                
                for col1, col2 in matched_pairs:
                    relationship = {
                        "source_table": table_name,
                        "source_column": col1,
                        "target_table": table_name,
                        "target_column": col2,
                        "relationship_type": f"internal_{pattern_type}",
                        "confidence": "medium",
                        "description": f"表内{pattern_type}关系: {col1} <-> {col2}",
                        "inference_method": "multi_column_analysis"
                    }
                    relationships.append(relationship)
        
        return relationships
    
    def _analyze_external_multi_column_relationships(self, source_table, source_table_info, schema):
        """分析与外部表的多列关系"""
        relationships = []
        source_columns = [col["name"] for col in source_table_info["columns"]]
        
        for target_table, target_table_info in schema["tables"].items():
            if target_table == source_table:
                continue
            
            target_columns = [col["name"] for col in target_table_info["columns"]]
            
            # 查找匹配的列对
            column_pairs = self._find_matching_column_pairs_between_tables(
                source_columns, target_columns, source_table, target_table
            )
            
            for source_col, target_col, relationship_type in column_pairs:
                relationship = {
                    "source_table": source_table,
                    "source_column": source_col,
                    "target_table": target_table,
                    "target_column": target_col,
                    "relationship_type": relationship_type,
                    "confidence": "medium",
                    "description": f"多列{relationship_type}关系: {source_col} -> {target_col}",
                    "inference_method": "multi_column_analysis"
                }
                relationships.append(relationship)
        
        return relationships
    
    def _analyze_composite_key_relationships(self, table_name, table_info, schema):
        """分析复合键关系"""
        relationships = []
        
        # 查找可能的复合键
        composite_key_candidates = self._find_composite_key_candidates(table_info["columns"])
        
        for composite_key in composite_key_candidates:
            # 为复合键中的每个列查找关联表
            for column_name in composite_key:
                target_info = self._find_related_table_for_column(column_name, table_name, schema)
                if target_info:
                    relationship = {
                        "source_table": table_name,
                        "source_column": column_name,
                        "target_table": target_info["table"],
                        "target_column": target_info["column"],
                        "relationship_type": "composite_key",
                        "confidence": target_info["confidence"],
                        "description": f"复合键关系: {column_name} 引用 {target_info['table']}.{target_info['column']}",
                        "inference_method": "composite_key_analysis"
                    }
                    relationships.append(relationship)
        
        return relationships
    
    def _find_matching_column_pairs(self, column_names, pattern1, pattern2):
        """查找匹配的列对"""
        pairs = []
        
        matches1 = [col for col in column_names if re.match(pattern1, col, re.IGNORECASE)]
        matches2 = [col for col in column_names if re.match(pattern2, col, re.IGNORECASE)]
        
        for col1 in matches1:
            base1 = self._extract_column_base(col1, pattern1)
            for col2 in matches2:
                base2 = self._extract_column_base(col2, pattern2)
                if base1 and base2 and base1 == base2 and col1 != col2:
                    pairs.append((col1, col2))
        
        return pairs
    
    def _find_matching_column_pairs_between_tables(self, source_columns, target_columns, source_table, target_table):
        """查找表间的匹配列对"""
        pairs = []
        
        # 基于列名相似性
        for source_col in source_columns:
            for target_col in target_columns:
                if self._are_columns_related(source_col, target_col, source_table, target_table):
                    relationship_type = self._determine_relationship_type(source_col, target_col)
                    pairs.append((source_col, target_col, relationship_type))
        
        return pairs
    
    def _find_composite_key_candidates(self, columns):
        """查找复合键候选"""
        candidates = []
        
        # 查找成对的相关列
        id_columns = [col["name"] for col in columns if self._is_id_like_column(col["name"])]
        
        if len(id_columns) >= 2:
            # 简单的组合策略 - 在实际应用中可能需要更复杂的逻辑
            for i in range(len(id_columns)):
                for j in range(i+1, len(id_columns)):
                    candidates.append([id_columns[i], id_columns[j]])
        
        return candidates
    
    def _find_related_table_for_column(self, column_name, current_table, schema):
        """为列查找相关表"""
        for table_name, table_info in schema["tables"].items():
            if table_name == current_table:
                continue
            
            # 查找匹配的主键
            for pk in table_info["primary_keys"]:
                if self._are_columns_related(column_name, pk, current_table, table_name):
                    return {
                        "table": table_name,
                        "column": pk,
                        "confidence": "high"
                    }
            
            # 查找匹配的列
            for column in table_info["columns"]:
                if (column["is_primary"] or column["is_unique"]) and self._are_columns_related(column_name, column["name"], current_table, table_name):
                    return {
                        "table": table_name,
                        "column": column["name"],
                        "confidence": "medium"
                    }
        
        return None
    
    def _extract_column_base(self, column_name, pattern):
        """从列名中提取基础部分"""
        match = re.match(pattern, column_name, re.IGNORECASE)
        if match:
            # 返回第一个捕获组
            return match.group(1) if match.groups() else None
        return None
    
    def _are_columns_related(self, col1, col2, table1, table2):
        """检查两个列是否相关"""
        # 相同的列名
        if col1.lower() == col2.lower():
            return True
        
        # 表名+列名模式
        t1_base = self._extract_table_base(table1).lower()
        t2_base = self._extract_table_base(table2).lower()
        
        patterns = [
            (f"{t1_base}_{col1}", f"{t2_base}_{col2}"),
            (col1, f"{t2_base}_{col2}"),
            (f"{t1_base}_{col1}", col2),
        ]
        
        for pattern1, pattern2 in patterns:
            if col1.lower() == pattern1 and col2.lower() == pattern2:
                return True
        
        # 检查列名模式
        col_patterns = [
            (r'(.+?)_id$', r'(.+?)_id$'),
            (r'(.+?)_code$', r'(.+?)_code$'),
            (r'(.+?)_no$', r'(.+?)_no$'),
        ]
        
        for pattern1, pattern2 in col_patterns:
            match1 = re.match(pattern1, col1, re.IGNORECASE)
            match2 = re.match(pattern2, col2, re.IGNORECASE)
            if match1 and match2:
                base1 = match1.group(1) if match1.groups() else None
                base2 = match2.group(1) if match2.groups() else None
                if base1 and base2 and base1 == base2:
                    return True
        
        return False
    
    def _determine_relationship_type(self, source_col, target_col):
        """确定关系类型"""
        source_lower = source_col.lower()
        target_lower = target_col.lower()
        
        # 时间关系
        if any(word in source_lower for word in ['start', 'begin', 'create', '开始', '创建']) and \
           any(word in target_lower for word in ['end', 'update', '结束', '更新']):
            return "temporal"
        
        # 空间关系
        if any(word in source_lower for word in ['from', 'source', '源', '起点']) and \
           any(word in target_lower for word in ['to', 'target', '目标', '终点']):
            return "spatial"
        
        # 状态关系
        if any(word in source_lower for word in ['old', 'previous', '旧', '原']) and \
           any(word in target_lower for word in ['new', 'current', '新', '现']):
            return "status"
        
        # 父子关系
        if any(word in source_lower for word in ['parent', '主', '上级']) and \
           any(word in target_lower for word in ['child', '子', '下级']):
            return "parent_child"
        
        return "general"
    
    def _is_id_like_column(self, column_name):
        """判断列是否为ID类型"""
        patterns = [
            r'.*_id$', r'.*Id$', r'.*ID$', r'.*_no$', r'.*_code$',
            r'.*编号$', r'.*代码$', r'.*号$', r'.*标识$'
        ]
        return any(re.match(pattern, column_name, re.IGNORECASE) for pattern in patterns)
    
    def _extract_table_base(self, table_name):
        """从表名中提取基础名称"""
        # 移除常见后缀
        suffixes = ['表', '信息', '数据', '记录', '详情', '列表', '计划', 's', 'es']
        base_name = table_name
        
        for suffix in suffixes:
            if base_name.lower().endswith(suffix.lower()):
                base_name = base_name[:-len(suffix)]
                break
        
        # 移除前缀
        prefixes = ['tbl_', 'tab_', 'table_', 'tb_']
        for prefix in prefixes:
            if base_name.lower().startswith(prefix.lower()):
                base_name = base_name[len(prefix):]
                break
                
        return base_name if base_name != table_name else table_name

class SameColumnNameRelationshipAnalyzer:
    """专门处理相同列名但无外键关系的分析器"""
    
    def __init__(self):
        self.common_join_columns = {
            # 常见的关联列名模式
            'id', 'code', 'no', 'number', 'key', 'ref', 'link',
            '标识', '编号', '代码', '号', '关键字', '关联', '关系'
        }
    
    def analyze_database(self, schema):
        """分析数据库中的相同列名关系"""
        same_column_relationships = []
        
        # 构建列名映射
        column_map = self._build_column_map(schema)
        
        # 分析每个共享列名
        for column_name, table_columns in column_map.items():
            if len(table_columns) > 1 and self._is_join_candidate(column_name):
                relationships = self._find_potential_relationships(table_columns, schema)
                same_column_relationships.extend(relationships)
        
        return same_column_relationships
    
    def _build_column_map(self, schema):
        """构建列名到表列的映射"""
        column_map = defaultdict(list)
        
        for table_name, table_info in schema["tables"].items():
            for column in table_info["columns"]:
                column_info = {
                    "table": table_name,
                    "column": column["name"],
                    "is_primary": column["is_primary"],
                    "is_unique": column["is_unique"],
                    "data_type": column["type"],
                    "table_info": table_info
                }
                column_map[column["name"]].append(column_info)
        
        return column_map
    
    def _is_join_candidate(self, column_name):
        """判断列名是否可能是连接候选"""
        column_lower = column_name.lower()
        
        # 检查是否是常见的ID模式
        id_patterns = [
            r'.*_id$', r'.*Id$', r'.*ID$', r'.*_no$', r'.*_code$',
            r'.*编号$', r'.*代码$', r'.*号$', r'.*标识$', r'^id$',
            r'.*key$', r'.*Key$', r'.*KEY$', r'.*_ref$', r'.*_link$'
        ]
        
        # 检查是否是常见的关键字
        common_keywords = {
            'id', 'code', 'no', 'number', 'key', 'ref', 'link',
            '标识', '编号', '代码', '号', '关键字', '关联'
        }
        
        is_id_pattern = any(re.match(pattern, column_lower) for pattern in id_patterns)
        is_common_keyword = column_lower in common_keywords
        
        return is_id_pattern or is_common_keyword
    
    def _find_potential_relationships(self, table_columns, schema):
        """在共享列名的表中查找潜在关系"""
        relationships = []
        
        # 按是否是主键/唯一键分组
        primary_columns = [col for col in table_columns if col["is_primary"] or col["is_unique"]]
        non_primary_columns = [col for col in table_columns if not col["is_primary"] and not col["is_unique"]]
        
        # 如果既有主键列又有非主键列，建立关系
        if primary_columns and non_primary_columns:
            for primary_col in primary_columns:
                for non_primary_col in non_primary_columns:
                    # 避免自引用
                    if primary_col["table"] != non_primary_col["table"]:
                        # 检查数据类型兼容性
                        if self._are_types_compatible(primary_col["data_type"], non_primary_col["data_type"]):
                            relationship = {
                                "source_table": non_primary_col["table"],
                                "source_column": non_primary_col["column"],
                                "target_table": primary_col["table"],
                                "target_column": primary_col["column"],
                                "relationship_type": "same_column_name",
                                "confidence": self._calculate_confidence(primary_col, non_primary_col),
                                "description": f"相同列名 '{non_primary_col['column']}' 关联",
                                "inference_method": "same_column_name"
                            }
                            relationships.append(relationship)
        
        return relationships
    
    def _are_types_compatible(self, type1, type2):
        """检查数据类型兼容性"""
        def normalize(data_type):
            data_type = data_type.lower()
            if any(t in data_type for t in ['int', 'integer']):
                return 'integer'
            elif any(t in data_type for t in ['char', 'text', 'varchar']):
                return 'string'
            elif any(t in data_type for t in ['date', 'time']):
                return 'datetime'
            elif any(t in data_type for t in ['decimal', 'float', 'double', 'numeric']):
                return 'numeric'
            else:
                return 'other'
        
        norm1, norm2 = normalize(type1), normalize(type2)
        
        # 允许的兼容组合
        compatible = [
            ('integer', 'integer'),
            ('string', 'string'),
            ('datetime', 'datetime'),
            ('numeric', 'numeric'),
            ('integer', 'numeric'),
            ('numeric', 'integer')
        ]
        
        return (norm1, norm2) in compatible
    
    def _calculate_confidence(self, primary_col, non_primary_col):
        """计算关系置信度"""
        confidence = "low"
        
        # 如果源列明显是外键模式，提高置信度
        source_col_name = non_primary_col["column"].lower()
        if any(pattern in source_col_name for pattern in ['_id', '_no', '_code', '_ref']):
            confidence = "medium"
        
        # 如果目标列是主键且源列明显引用它，提高置信度
        if (primary_col["is_primary"] and 
            self._is_likely_foreign_key(source_col_name, primary_col["table"])):
            confidence = "high"
        
        return confidence
    
    def _is_likely_foreign_key(self, column_name, target_table):
        """判断列名是否很可能是引用特定表的外键"""
        table_base = self._extract_table_base(target_table).lower()
        column_lower = column_name.lower()
        
        # 检查是否是表名+id模式
        patterns = [
            f"{table_base}id",
            f"{table_base}_id",
            f"{table_base}no",
            f"{table_base}_no",
            f"{table_base}code",
            f"{table_base}_code"
        ]
        
        return any(pattern in column_lower for pattern in patterns)
    
    def _extract_table_base(self, table_name):
        """提取表的基础名称"""
        # 移除常见后缀
        suffixes = ['表', '信息', '数据', '记录', '详情', '列表', '计划', 's', 'es']
        base_name = table_name
        
        for suffix in suffixes:
            if base_name.lower().endswith(suffix.lower()):
                base_name = base_name[:-len(suffix)]
                break
        
        # 移除前缀
        prefixes = ['tbl_', 'tab_', 'table_', 'tb_']
        for prefix in prefixes:
            if base_name.lower().startswith(prefix.lower()):
                base_name = base_name[len(prefix):]
                break
                
        return base_name if base_name != table_name else table_name

class EnhancedColumnLevelGraphBuilder:
    def __init__(self, mysql_config, neo4j_config):
        self.mysql_config = mysql_config
        self.neo4j_driver = GraphDatabase.driver(
            neo4j_config['uri'], 
            auth=(neo4j_config['user'], neo4j_config['password'])
        )
        self.relationship_stats = defaultdict(int)
        self.same_column_analyzer = SameColumnNameRelationshipAnalyzer()
        self.multi_column_analyzer = MultiColumnRelationshipAnalyzer()
    
    def close(self):
        self.neo4j_driver.close()
    
    def get_all_databases(self):
        """获取所有数据库"""
        try:
            connection = mysql.connector.connect(**self.mysql_config)
            cursor = connection.cursor()
            
            cursor.execute("SHOW DATABASES")
            databases = [db[0] for db in cursor.fetchall()]
            
            # 过滤系统数据库
            system_dbs = ['information_schema', 'mysql', 'performance_schema', 'sys', 'testdb']
            user_databases = [db for db in databases if db not in system_dbs]
            
            cursor.close()
            connection.close()
            
            logger.info(f"发现 {len(user_databases)} 个用户数据库")
            return user_databases
            
        except Error as e:
            logger.error(f"获取数据库列表失败: {e}")
            return []
    
    def extract_enhanced_schema(self, database_name):
        """增强的数据库结构提取"""
        try:
            config = self.mysql_config.copy()
            config['database'] = database_name
            connection = mysql.connector.connect(**config)
            cursor = connection.cursor()
            
            schema = {
                "database": database_name,
                "tables": {},
                "table_name_patterns": defaultdict(list)
            }
            
            # 获取所有表
            cursor.execute("SHOW TABLES")
            tables = [table[0] for table in cursor.fetchall()]
            
            for table in tables:
                schema["tables"][table] = {
                    "columns": [],
                    "primary_keys": [],
                    "foreign_keys": [],
                    "indexes": []
                }
                
                # 获取详细的表结构
                cursor.execute(f"DESCRIBE `{table}`")
                columns = cursor.fetchall()
                
                for column in columns:
                    column_name = column[0]
                    data_type = column[1]
                    is_nullable = column[2]
                    column_key = column[3]  # PRI, UNI, MUL
                    column_default = column[4]
                    extra = column[5]
                    
                    column_info = {
                        "name": column_name,
                        "type": data_type,
                        "nullable": is_nullable == "YES",
                        "default": column_default,
                        "extra": extra,
                        "is_primary": column_key == "PRI",
                        "is_unique": column_key == "UNI",
                        "is_foreign": column_key == "MUL"
                    }
                    
                    schema["tables"][table]["columns"].append(column_info)
                    
                    if column_key == "PRI":
                        schema["tables"][table]["primary_keys"].append(column_name)
                
                # 分析表名模式
                self._analyze_table_name_patterns(table, schema)
            
            # 多种关系推断策略
            self._enhanced_relationship_analysis(schema, cursor, database_name)
            
            cursor.close()
            connection.close()
            
            total_columns = self._count_total_columns(schema)
            total_relationships = sum(len(table_info["foreign_keys"]) for table_info in schema["tables"].values())
            
            logger.info(f"数据库 '{database_name}': {len(tables)} 张表, {total_columns} 个列, {total_relationships} 个推断关系")
            return schema
            
        except Error as e:
            logger.error(f"提取数据库 '{database_name}' 结构失败: {e}")
            return None
    
    def _analyze_table_name_patterns(self, table_name, schema):
        """分析表名模式"""
        # 中文表名分析
        chinese_patterns = {
            '信息$': 'info',
            '表$': 'table', 
            '数据$': 'data',
            '记录$': 'record',
            '详情$': 'detail',
            '列表$': 'list',
            '计划$': 'plan',
            '关系$': 'relation',
            '映射$': 'mapping'
        }
        
        for pattern, category in chinese_patterns.items():
            if re.search(pattern, table_name):
                schema["table_name_patterns"][category].append(table_name)
                break
    
    def _enhanced_relationship_analysis(self, schema, cursor, database_name):
        """增强的关系分析"""
        # 策略1: 基于列名的模式匹配
        self._pattern_based_relationship_analysis(schema)
        
        # 策略2: 基于表名语义的关系推断
        self._semantic_based_relationship_analysis(schema)
        
        # 策略3: 多对多关系识别
        self._many_to_many_relationship_analysis(schema)
        
        # 策略4: 数据类型匹配
        self._data_type_based_relationship_analysis(schema)
        
        # 策略5: 相同列名关系分析
        self._same_column_name_relationship_analysis(schema)
        
        # 策略6: 多列关系分析
        self._multi_column_relationship_analysis(schema)
    
    def _pattern_based_relationship_analysis(self, schema):
        """基于列名模式的关系分析"""
        # 构建主键映射
        primary_key_map = {}
        for table_name, table_info in schema["tables"].items():
            for pk in table_info["primary_keys"]:
                primary_key_map[pk] = table_name
        
        # 分析每个表的列
        for table_name, table_info in schema["tables"].items():
            for column in table_info["columns"]:
                if column["is_primary"]:
                    continue
                
                # 多种外键模式
                patterns = [
                    # 表名 + id 模式
                    (r'^(.+?)(_id|Id|ID)$', 1),
                    # 表名 + 编号/代码 模式
                    (r'^(.+?)(_编号|_代码|编号|代码)$', 1),
                    # 关联表名模式
                    (r'^关联(.+?)$', 1),
                    (r'^related_(.+?)$', 1),
                    # 通用id模式
                    (r'^id$', 0),  # 特殊处理
                ]
                
                for pattern, group_idx in patterns:
                    match = re.match(pattern, column["name"], re.IGNORECASE)
                    if match:
                        if pattern == r'^id$':  # 特殊处理纯id列
                            # 查找其他表的主键
                            for target_table, target_info in schema["tables"].items():
                                if target_table == table_name:
                                    continue
                                if "id" in target_info["primary_keys"]:
                                    self._add_foreign_key_relationship(
                                        schema, table_name, column["name"], 
                                        target_table, "id", "high", 
                                        f"纯id列引用 {target_table}.id",
                                        "pattern_based"
                                    )
                        else:
                            extracted_name = match.group(group_idx)
                            # 查找匹配的表
                            for target_table in schema["tables"]:
                                if self._is_table_match(extracted_name, target_table):
                                    # 查找目标表的主键
                                    target_pk = self._find_primary_key(schema["tables"][target_table])
                                    if target_pk:
                                        self._add_foreign_key_relationship(
                                            schema, table_name, column["name"],
                                            target_table, target_pk, "high",
                                            f"列名模式匹配: {column['name']} -> {target_table}.{target_pk}",
                                            "pattern_based"
                                        )
    
    def _semantic_based_relationship_analysis(self, schema):
        """基于语义的关系分析"""
        # 分析表名之间的关系
        table_names = list(schema["tables"].keys())
        
        for i, table1 in enumerate(table_names):
            for table2 in table_names[i+1:]:
                # 检查表名是否相关
                if self._are_tables_related(table1, table2):
                    # 尝试找到关联列
                    relationship = self._find_semantic_relationship(schema, table1, table2)
                    if relationship:
                        table1_col, table2_col, confidence = relationship
                        self._add_foreign_key_relationship(
                            schema, table1, table1_col, table2, table2_col, 
                            confidence, f"语义关系: {table1} <-> {table2}",
                            "semantic"
                        )
    
    def _many_to_many_relationship_analysis(self, schema):
        """多对多关系分析"""
        # 查找关联表（通常包含两个外键）
        for table_name, table_info in schema["tables"].items():
            foreign_key_candidates = []
            
            for column in table_info["columns"]:
                if self._is_foreign_key_candidate(column["name"]):
                    foreign_key_candidates.append(column["name"])
            
            # 如果表有两个以上的外键候选，可能是关联表
            if len(foreign_key_candidates) >= 2:
                logger.debug(f"发现可能的关联表: {table_name}, 外键候选: {foreign_key_candidates}")
    
    def _data_type_based_relationship_analysis(self, schema):
        """基于数据类型的关系分析"""
        # 构建数据类型到列的映射
        type_column_map = defaultdict(list)
        for table_name, table_info in schema["tables"].items():
            for column in table_info["columns"]:
                if column["is_primary"]:
                    type_column_map[(table_name, self._normalize_data_type(column["type"]))].append(column["name"])
        
        # 查找相同数据类型的列
        for (table1, data_type), columns1 in type_column_map.items():
            for (table2, _), columns2 in type_column_map.items():
                if table1 != table2 and data_type in ["int", "varchar"]:
                    # 尝试匹配列
                    for col1 in columns1:
                        for col2 in columns2:
                            if self._are_columns_related(col1, col2, table1, table2):
                                confidence = "medium" if data_type == "int" else "low"
                                self._add_foreign_key_relationship(
                                    schema, table1, col1, table2, col2,
                                    confidence, f"数据类型匹配: {data_type}",
                                    "data_type_based"
                                )
    
    def _same_column_name_relationship_analysis(self, schema):
        """相同列名关系分析"""
        same_column_relationships = self.same_column_analyzer.analyze_database(schema)
        
        for relationship in same_column_relationships:
            self._add_foreign_key_relationship(
                schema,
                relationship["source_table"],
                relationship["source_column"],
                relationship["target_table"],
                relationship["target_column"],
                relationship["confidence"],
                relationship["description"],
                relationship["relationship_type"]
            )
    
    def _multi_column_relationship_analysis(self, schema):
        """多列关系分析"""
        multi_column_relationships = self.multi_column_analyzer.analyze_multi_column_relationships(schema)
        
        for relationship in multi_column_relationships:
            self._add_foreign_key_relationship(
                schema,
                relationship["source_table"],
                relationship["source_column"],
                relationship["target_table"],
                relationship["target_column"],
                relationship["confidence"],
                relationship["description"],
                relationship["relationship_type"]
            )
            
            # 更新多列关系统计
            self.relationship_stats["multi_column"] += 1
            self.relationship_stats[relationship["relationship_type"]] += 1
    
    def _is_table_match(self, extracted_name, table_name):
        """检查提取的名称是否与表名匹配"""
        table_base = self._extract_table_base(table_name).lower()
        extracted_lower = extracted_name.lower()
        
        # 直接匹配
        if table_base == extracted_lower:
            return True
        
        # 部分匹配
        if extracted_lower in table_base or table_base in extracted_lower:
            return True
        
        # 中文匹配
        chinese_mappings = {
            '用户': ['用户', '会员', '客户'],
            '产品': ['产品', '商品', '物品'],
            '订单': ['订单', '单据', '交易'],
            '分类': ['分类', '类别', '类型']
        }
        
        for key, values in chinese_mappings.items():
            if extracted_lower in values and any(v in table_base for v in values):
                return True
        
        return False
    
    def _find_primary_key(self, table_info):
        """查找表的主键"""
        if table_info["primary_keys"]:
            return table_info["primary_keys"][0]
        
        # 如果没有明确定义的主键，查找id列
        for column in table_info["columns"]:
            if column["name"].lower() == "id":
                return "id"
        
        return None
    
    def _are_tables_related(self, table1, table2):
        """检查两个表是否相关"""
        t1_base = self._extract_table_base(table1).lower()
        t2_base = self._extract_table_base(table2).lower()
        
        # 常见的关系模式
        relationship_patterns = [
            (r'(.+?)信息$', r'\1详情$'),
            (r'(.+?)主表$', r'\1明细$'),
            (r'(.+?)表$', r'\1关系$'),
            (r'(.+?)数据$', r'\1记录$'),
        ]
        
        for pattern1, pattern2 in relationship_patterns:
            if re.match(pattern1, table1) and re.match(pattern2, table2):
                return True
            if re.match(pattern1, table2) and re.match(pattern2, table1):
                return True
        
        # 检查表名是否包含相同的基础词
        common_words = set(t1_base) & set(t2_base)
        return len(common_words) >= 2
    
    def _find_semantic_relationship(self, schema, table1, table2):
        """查找语义关系"""
        # 查找可能的关联列
        for col1 in schema["tables"][table1]["columns"]:
            for col2 in schema["tables"][table2]["columns"]:
                if self._are_columns_related(col1["name"], col2["name"], table1, table2):
                    # 检查数据类型是否兼容
                    type1 = self._normalize_data_type(col1["type"])
                    type2 = self._normalize_data_type(col2["type"])
                    
                    if type1 == type2:
                        return col1["name"], col2["name"], "medium"
        
        return None
    
    def _are_columns_related(self, col1, col2, table1, table2):
        """检查两个列是否相关"""
        col1_lower = col1.lower()
        col2_lower = col2.lower()
        
        # 相同的列名
        if col1_lower == col2_lower:
            return True
        
        # 表名+id模式
        t1_base = self._extract_table_base(table1).lower()
        t2_base = self._extract_table_base(table2).lower()
        
        patterns = [
            (f"{t1_base}id", f"{t2_base}id"),
            (f"{t1_base}_id", f"{t2_base}_id"),
            ("id", f"{t2_base}id"),
            (f"{t1_base}id", "id"),
        ]
        
        for pattern1, pattern2 in patterns:
            if col1_lower == pattern1 and col2_lower == pattern2:
                return True
        
        return False
    
    def _normalize_data_type(self, data_type):
        """规范化数据类型"""
        data_type = data_type.lower()
        if 'int' in data_type:
            return 'int'
        elif 'char' in data_type or 'text' in data_type:
            return 'varchar'
        elif 'date' in data_type or 'time' in data_type:
            return 'datetime'
        else:
            return 'other'
    
    def _is_foreign_key_candidate(self, column_name):
        """判断是否为外键候选"""
        patterns = [
            r'.*_id$', r'.*Id$', r'.*ID$', r'.*_no$', r'.*_code$',
            r'.*编号$', r'.*代码$', r'.*号$', r'.*标识$', r'^id$'
        ]
        
        return any(re.match(pattern, column_name, re.IGNORECASE) for pattern in patterns)
    
    def _add_foreign_key_relationship(self, schema, source_table, source_column, 
                                    target_table, target_column, confidence, description, 
                                    relationship_type="general"):
        """添加外键关系"""
        # 避免重复
        for existing_fk in schema["tables"][source_table]["foreign_keys"]:
            if (existing_fk["source_column"] == source_column and 
                existing_fk["target_table"] == target_table):
                return
        
        foreign_key_info = {
            "source_column": source_column,
            "target_table": target_table,
            "target_column": target_column,
            "relationship": "REFERENCES",
            "confidence": confidence,
            "description": description,
            "relationship_type": relationship_type
        }
        
        schema["tables"][source_table]["foreign_keys"].append(foreign_key_info)
        self.relationship_stats[confidence] += 1
        self.relationship_stats[relationship_type] += 1
    
    def _extract_table_base(self, table_name):
        """从表名中提取基础名称"""
        # 移除常见后缀
        suffixes = ['表', '信息', '数据', '记录', '详情', '列表', '计划', 's', 'es']
        base_name = table_name
        
        for suffix in suffixes:
            if base_name.lower().endswith(suffix.lower()):
                base_name = base_name[:-len(suffix)]
                break
        
        # 移除前缀
        prefixes = ['tbl_', 'tab_', 'table_', 'tb_']
        for prefix in prefixes:
            if base_name.lower().startswith(prefix.lower()):
                base_name = base_name[len(prefix):]
                break
                
        return base_name if base_name != table_name else table_name
    
    def _count_total_columns(self, schema):
        """计算总列数"""
        total = 0
        for table_info in schema["tables"].values():
            total += len(table_info["columns"])
        return total
    
    def build_enhanced_column_level_graph(self, schema):
        """构建增强的列级别图"""
        if not schema:
            return False
        
        database_name = schema["database"]
        
        with self.neo4j_driver.session() as session:
            try:
                # 清理该数据库的旧数据
                session.run("MATCH (n {database: $db_name}) DETACH DELETE n", db_name=database_name)
                
                # 创建数据库节点
                session.run(
                    """
                    CREATE (db:Database {
                        name: $name, 
                        table_count: $table_count,
                        column_count: $column_count,
                        relationship_count: $relationship_count,
                        processed_at: datetime()
                    })
                    """,
                    name=database_name,
                    table_count=len(schema["tables"]),
                    column_count=self._count_total_columns(schema),
                    relationship_count=sum(len(table_info["foreign_keys"]) for table_info in schema["tables"].values())
                )
                
                # 创建表节点和列节点
                for table_name, table_info in schema["tables"].items():
                    # 创建表节点
                    session.run(
                        """
                        MATCH (db:Database {name: $db_name})
                        CREATE (t:Table {
                            name: $table_name,
                            database: $db_name,
                            column_count: $column_count,
                            primary_key_count: $pk_count,
                            foreign_key_count: $fk_count
                        })
                        CREATE (db)-[:CONTAINS_TABLE]->(t)
                        """,
                        db_name=database_name,
                        table_name=table_name,
                        column_count=len(table_info["columns"]),
                        pk_count=len(table_info["primary_keys"]),
                        fk_count=len(table_info["foreign_keys"])
                    )
                    
                    # 创建列节点并建立表-列关系
                    for column in table_info["columns"]:
                        session.run(
                            """
                            MATCH (t:Table {name: $table_name, database: $db_name})
                            CREATE (c:Column {
                                name: $column_name,
                                table_name: $table_name,
                                database: $db_name,
                                full_name: $full_name,
                                type: $data_type,
                                nullable: $nullable,
                                is_primary: $is_primary,
                                is_unique: $is_unique,
                                is_foreign: $is_foreign,
                                default_value: $default_value
                            })
                            CREATE (t)-[:HAS_COLUMN]->(c)
                            """,
                            db_name=database_name,
                            table_name=table_name,
                            column_name=column["name"],
                            full_name=f"{table_name}.{column['name']}",
                            data_type=column["type"],
                            nullable=column["nullable"],
                            is_primary=column["is_primary"],
                            is_unique=column["is_unique"],
                            is_foreign=column["is_foreign"],
                            default_value=str(column["default"]) if column["default"] is not None else None
                        )
                
                # 创建列-列外键关系
                foreign_key_count = 0
                for table_name, table_info in schema["tables"].items():
                    for fk in table_info["foreign_keys"]:
                        try:
                            result = session.run(
                                """
                                MATCH (source_col:Column {
                                    name: $source_col, 
                                    table_name: $source_table, 
                                    database: $db_name
                                })
                                MATCH (target_col:Column {
                                    name: $target_col, 
                                    table_name: $target_table, 
                                    database: $db_name
                                })
                                CREATE (source_col)-[r:REFERENCES {
                                    relationship: $relationship,
                                    confidence: $confidence,
                                    description: $description,
                                    inference_method: $method,
                                    relationship_type: $relationship_type,
                                    created_at: datetime()
                                }]->(target_col)
                                RETURN source_col, target_col
                                """,
                                db_name=database_name,
                                source_col=fk["source_column"],
                                source_table=table_name,
                                target_col=fk["target_column"],
                                target_table=fk["target_table"],
                                relationship=fk["relationship"],
                                confidence=fk["confidence"],
                                description=fk["description"],
                                method="enhanced_inference",
                                relationship_type=fk.get("relationship_type", "general")
                            )
                            
                            if result.single():
                                foreign_key_count += 1
                                
                        except Exception as e:
                            logger.warning(f"创建外键关系失败: {table_name}.{fk['source_column']} -> {fk['target_table']}.{fk['target_column']}, 错误: {e}")
                
                logger.info(f"✅ 成功构建数据库 '{database_name}' 的增强列级别图: {len(schema['tables'])} 表, {self._count_total_columns(schema)} 列, {foreign_key_count} 个外键关系")
                return True
                
            except Exception as e:
                logger.error(f"构建数据库 '{database_name}' 的列级别图失败: {e}")
                return False
    
    def process_databases_with_enhanced_analysis(self, sample_size=None):
        """使用增强分析处理数据库"""
        databases = self.get_all_databases()
        
        if sample_size:
            databases = databases[:sample_size]
        
        if not databases:
            logger.info("没有需要处理的数据库")
            return
        
        logger.info(f"开始为 {len(databases)} 个数据库构建增强列级别图")
        
        success_count = 0
        total_tables = 0
        total_columns = 0
        total_relationships = 0
        
        # 跳过已知有问题的数据库
        problematic_databases = ['cre_Doc_Control_Systems']
        
        for i, db_name in enumerate(databases, 1):
            if db_name in problematic_databases:
                logger.info(f"跳过已知有问题的数据库: {db_name}")
                continue
                
            logger.info(f"处理数据库 {i}/{len(databases)}: {db_name}")
            
            try:
                schema = self.extract_enhanced_schema(db_name)
                if schema:
                    # 检查schema是否包含必要的数据
                    if not schema.get("tables"):
                        logger.warning(f"数据库 '{db_name}' 没有表，跳过")
                        continue
                        
                    success = self.build_enhanced_column_level_graph(schema)
                    if success:
                        success_count += 1
                        total_tables += len(schema["tables"])
                        total_columns += self._count_total_columns(schema)
                        db_relationships = sum(len(table_info["foreign_keys"]) for table_info in schema["tables"].values())
                        total_relationships += db_relationships
                        
                        logger.info(f"数据库 '{db_name}' 关系统计: {db_relationships} 个推断关系")
                    else:
                        logger.error(f"构建数据库 '{db_name}' 的图失败")
                else:
                    logger.error(f"提取数据库 '{db_name}' 结构失败")
            except Exception as e:
                logger.error(f"处理数据库 '{db_name}' 时发生错误: {e}")
                import traceback
                logger.debug(traceback.format_exc())
            
            # 进度报告
            if i % 10 == 0:
                logger.info(f"进度: {i}/{len(databases)}，成功: {success_count}，总表: {total_tables}，总列: {total_columns}，总关系: {total_relationships}")
        
        # 输出关系统计
        logger.info(f"关系推断统计: {dict(self.relationship_stats)}")
        
        logger.info(f"增强列级别图构建完成! 成功处理 {success_count}/{len(databases)} 个数据库")
        return {
            "processed": success_count,
            "total_tables": total_tables,
            "total_columns": total_columns,
            "total_relationships": total_relationships
        }
    
    def generate_enhanced_relationship_report(self):
        """生成增强的关系分析报告"""
        with self.neo4j_driver.session() as session:
            # 总体统计
            result = session.run("""
                MATCH (db:Database)
                RETURN count(db) as total_databases,
                       sum(db.table_count) as total_tables,
                       sum(db.column_count) as total_columns,
                       sum(db.relationship_count) as total_relationships
            """)
            
            stats = result.single()
            
            # 关系最多的数据库
            result2 = session.run("""
                MATCH (db:Database)
                RETURN db.name as database_name,
                       db.table_count as table_count,
                       db.column_count as column_count,
                       db.relationship_count as relationship_count
                ORDER BY db.relationship_count DESC
                LIMIT 15
            """)
            
            top_databases = [record.data() for record in result2]
            
            # 关系置信度统计
            result3 = session.run("""
                MATCH (c1:Column)-[r:REFERENCES]->(c2:Column)
                RETURN r.confidence as confidence, count(r) as count
                ORDER BY count DESC
            """)
            
            confidence_stats = [record.data() for record in result3]
            
            # 多列关系统计
            result4 = session.run("""
                MATCH (c1:Column)-[r:REFERENCES]->(c2:Column)
                WHERE r.inference_method CONTAINS 'multi_column' OR r.relationship_type CONTAINS 'internal_'
                RETURN count(r) as multi_column_relationships
            """)
            
            multi_column_stats = result4.single()
            
            # 关系类型统计
            result5 = session.run("""
                MATCH (c1:Column)-[r:REFERENCES]->(c2:Column)
                WHERE r.relationship_type IS NOT NULL
                RETURN r.relationship_type as type, count(r) as count
                ORDER BY count DESC
            """)
            
            relationship_type_stats = [record.data() for record in result5]
            
            print("\n" + "="*80)
            print("📊 增强列级别图分析报告 (多列关系)")
            print("="*80)
            print(f"📁 总数据库数: {stats['total_databases']}")
            print(f"📋 总表数: {stats['total_tables']}")
            print(f"🔢 总列数: {stats['total_columns']}")
            print(f"🔗 总推断关系: {stats['total_relationships']}")
            
            if stats['total_tables'] > 0:
                relationship_density = stats['total_relationships'] / stats['total_tables']
                print(f"📈 关系密度: {relationship_density:.2f} 关系/表")
            
            if multi_column_stats and multi_column_stats['multi_column_relationships'] > 0:
                print(f"🔗 多列推断关系: {multi_column_stats['multi_column_relationships']} 个")
            
            print("\n🏆 关系最丰富的数据库 (Top 15):")
            for i, db in enumerate(top_databases, 1):
                if db['relationship_count'] > 0:
                    table_ratio = db['relationship_count'] / db['table_count'] if db['table_count'] > 0 else 0
                    print(f"  {i:2d}. {db['database_name']:25} {db['table_count']:3}表/{db['relationship_count']:3}关系 (密度: {table_ratio:.1f})")
            
            print("\n🎯 关系置信度分布:")
            for stat in confidence_stats:
                print(f"  {stat['confidence']:10}: {stat['count']:4} 个关系")
            
            if relationship_type_stats:
                print("\n🔧 关系类型分布:")
                for stat in relationship_type_stats:
                    print(f"  {stat['type']:15}: {stat['count']:4} 个关系")
            
            # 查找没有关系的数据库
            result6 = session.run("""
                MATCH (db:Database)
                WHERE db.relationship_count = 0
                RETURN db.name as database_name, db.table_count as table_count
                ORDER BY db.table_count DESC
                LIMIT 10
            """)
            
            no_relationship_dbs = [record.data() for record in result6]
            
            if no_relationship_dbs:
                print(f"\n⚠️  没有推断出关系的数据库 (Top 10):")
                for i, db in enumerate(no_relationship_dbs, 1):
                    print(f"  {i:2d}. {db['database_name']:25} {db['table_count']:3}张表")

def main():
    print("=== 增强列级别数据库图构建器 (多列关系版) ===")
    print("使用多种策略推断表间关系，包括多列关系分析")
    
    # 获取密码
    mysql_password = getpass.getpass("请输入MySQL root密码: ")
    neo4j_password = getpass.getpass("请输入Neo4j密码: ")
    
    # 数据库配置
    mysql_config = {
        "host": "192.168.0.162",
        "user": "root",
        "password": mysql_password,
        "port": 3310,
        "connection_timeout": 10
    }
    
    neo4j_config = {
        "uri": "bolt://192.168.0.162:7687",
        "user": "neo4j",
        "password": neo4j_password
    }
    
    builder = EnhancedColumnLevelGraphBuilder(mysql_config, neo4j_config)
    
    try:
        # 测试连接
        print("测试数据库连接...")
        test_conn = mysql.connector.connect(**mysql_config)
        if test_conn.is_connected():
            print("✅ MySQL连接成功")
            test_conn.close()
        
        # 选择处理范围
        print("\n选择处理范围:")
        print("1. 处理所有数据库")
        print("2. 处理样本数据库（前20个）")
        print("3. 处理特定数据库")
        
        choice = input("请选择 (1/2/3, 默认2): ").strip() or "2"
        
        if choice == "1":
            print("处理所有数据库...")
            result = builder.process_databases_with_enhanced_analysis()
        elif choice == "3":
            db_name = input("请输入数据库名称: ").strip()
            print(f"处理特定数据库: {db_name}")
            schema = builder.extract_enhanced_schema(db_name)
            if schema:
                builder.build_enhanced_column_level_graph(schema)
                result = {"processed": 1, "total_tables": len(schema["tables"]), 
                         "total_columns": builder._count_total_columns(schema),
                         "total_relationships": sum(len(table_info["foreign_keys"]) for table_info in schema["tables"].values())}
            else:
                result = {"processed": 0, "total_tables": 0, "total_columns": 0, "total_relationships": 0}
        else:
            print("处理前20个数据库作为样本...")
            result = builder.process_databases_with_enhanced_analysis(sample_size=20)
        
        # 生成详细报告
        builder.generate_enhanced_relationship_report()
        
        if result:
            print(f"\n✅ 处理完成! 成功处理 {result['processed']} 个数据库")
            print(f"📋 总表数: {result['total_tables']}")
            print(f"🔢 总列数: {result['total_columns']}")
            print(f"🔗 总推断关系: {result['total_relationships']}")
            
            if result['total_tables'] > 0:
                density = result['total_relationships'] / result['total_tables']
                print(f"📈 平均关系密度: {density:.2f} 关系/表")
        
        print("🌐 访问 Neo4j Browser: http://192.168.0.162:7474/browser/")
        print("📋 详细日志请查看: multi_column_relationship_builder.log")
        
    except Exception as e:
        logger.error(f"程序执行失败: {e}")
        print(f"❌ 错误: {e}")
    finally:
        builder.close()

if __name__ == "__main__":
    main()