import os
import json
import re
import traceback
from collections import defaultdict
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, List, Tuple, Dict, Any
import threading
import numpy as np

# ===== 依赖 =====
try:
    import faiss  # pip install faiss-cpu
except Exception as e:
    raise RuntimeError("请先安装 faiss-cpu: pip install faiss-cpu") from e

# 环境变量配置
USE_OPENAI_EMBEDDING = os.getenv("USE_OPENAI_EMBEDDING", "0") == "1"
USE_DEEPSEEK_EMBEDDING = os.getenv("USE_DEEPSEEK_EMBEDDING", "0") == "1"
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY", "")

CACHE_FILE = "fk_cache.json"


# =========================
# 工具函数
# =========================

def _stem(s: str) -> str:
    return re.sub(r"[^a-z0-9]", "", s.lower())


def _table_tokens(name: str) -> list:
    if not name:
        return []
    s = re.sub(r"[^a-z0-9_]", "_", name.lower())
    tokens = [t for t in re.split(r"[_\W]+", s) if t]
    prefixes = {"tbl", "t", "sys", "dim", "fact", "cif", "mst", "ref", "stg", "refdata"}
    tokens = [t for t in tokens if t not in prefixes]
    return tokens


def _normalize_table_name(name: str) -> str:
    toks = _table_tokens(name)
    if not toks:
        return _stem(name)
    return toks[-1]


def _is_numeric(dtype: str) -> bool:
    if not dtype:
        return False
    dl = dtype.lower()
    return any(
        k in dl for k in ["int", "decimal", "number", "bigint", "smallint", "float", "double", "real"]) and not any(
        k in dl for k in ["date", "time"])


def _is_string(dtype: str) -> bool:
    if not dtype:
        return False
    dl = dtype.lower()
    return any(k in dl for k in ["char", "text", "string", "varchar", "nvarchar"])


def _is_datetime(dtype: str) -> bool:
    if not dtype:
        return False
    dl = dtype.lower()
    return any(k in dl for k in ["date", "time", "timestamp"])


def _name_bonus(child_col: str, parent_table: str, parent_col: str) -> float:
    c = _stem(child_col)
    pt = _normalize_table_name(parent_table)
    pc = _stem(parent_col)

    bonus = 0.0
    # 典型 FK 命名
    if c.endswith("id"):
        if pt in c or c.replace("id", "") == pt or pt.endswith(c.replace("id", "")):
            bonus += 0.55
    if c.endswith("no"):
        if pt in c or c.replace("no", "") == pt or pt.endswith(c.replace("no", "")):
            bonus += 0.55
    if c.endswith("code"):
        if pt in c or c.replace("code", "") == pt or pt.endswith(c.replace("code", "")):
            bonus += 0.55
    if c.endswith("type"):
        if pt in c or c.replace("type", "") == pt or pt.endswith(c.replace("type", "")):
            bonus += 0.45
    # 包含表名/主键名
    if pt in c:
        bonus += 0.2
    if pc in c:
        bonus += 0.2
    return min(bonus, 0.95)


def _type_bonus(child_dtype: str, parent_dtype: str) -> float:
    if not child_dtype or not parent_dtype:
        return 0.0
    if _is_datetime(child_dtype) or _is_datetime(parent_dtype):
        return -0.2
    if _is_numeric(child_dtype) and _is_numeric(parent_dtype):
        return 0.35
    if _is_string(child_dtype) and _is_string(parent_dtype):
        return 0.25
    return -0.05


def _value_overlap_bonus(child_vals: List[Any], parent_vals: List[Any]) -> float:
    if not child_vals or not parent_vals:
        return 0.0
    cv = {str(v) for v in child_vals if v is not None and str(v) != ""}
    pv = {str(v) for v in parent_vals if v is not None and str(v) != ""}
    if not cv or not pv:
        return 0.0
    inter = len(cv & pv)
    if inter == 0:
        return 0.0
    ratio = inter / max(1, len(cv))
    return max(0.0, min(0.6, ratio * 0.6))


# =========================
# SQL 解析器 - 改进版本
# =========================
class SQLTableParser:
    def __init__(self):
        self.tables = {}

    def parse_create_table(self, sql_content: str) -> List[Dict]:
        """解析SQL文件中的CREATE TABLE语句"""
        tables = []

        # 改进的正则表达式，更好地处理约束和索引
        create_pattern = r'CREATE\s+TABLE\s+(?:IF NOT EXISTS\s+)?`?(\w+)`?\s*\((.*?)\)\s*(?:ENGINE.*?)?;'
        column_pattern = r'`?(\w+)`?\s+(\w+(?:\(\d+(?:,\d+)?\))?)(?:\s+(?:UNSIGNED|ZEROFILL))?\s*(?:CHARACTER SET\s+\w+)?\s*(?:COLLATE\s+\w+)?\s*(?:NOT NULL|NULL)?\s*(?:DEFAULT\s+(?:[^,]+|\([^)]+\)))?\s*(?:AUTO_INCREMENT)?\s*(?:COMMENT\s*\'([^\']*)\')?'

        # 查找所有CREATE TABLE语句
        create_matches = re.finditer(create_pattern, sql_content, re.DOTALL | re.IGNORECASE)

        for match in create_matches:
            table_name = match.group(1)
            table_content = match.group(2)

            # 解析列定义
            columns = []
            column_matches = re.finditer(column_pattern, table_content, re.DOTALL | re.IGNORECASE)

            for col_match in column_matches:
                col_name = col_match.group(1)
                col_type = col_match.group(2)
                col_comment = col_match.group(3) if col_match.group(3) else ""

                # 检查是否是主键 - 改进的主键检测
                is_primary = False
                primary_key_pattern = r'PRIMARY\s+KEY\s*\(([^)]+)\)'
                primary_keys_match = re.search(primary_key_pattern, table_content, re.IGNORECASE)
                if primary_keys_match:
                    primary_keys = [pk.strip().strip('`') for pk in primary_keys_match.group(1).split(',')]
                    if col_name in primary_keys:
                        is_primary = True

                columns.append({
                    'name': col_name,
                    'type': col_type,
                    'comment': col_comment,
                    'is_primary': is_primary
                })

            # 如果没有找到主键，尝试通过列名猜测
            if not any(col['is_primary'] for col in columns):
                for col in columns:
                    col_name_lower = col['name'].lower()
                    if col_name_lower == 'id' or col_name_lower.endswith(
                            '_id') or col_name_lower == table_name.lower() + 'id':
                        col['is_primary'] = True
                        break

            tables.append({
                'name': table_name,
                'columns': columns
            })

        return tables


# =========================
# 嵌入后端
# =========================
class EmbeddingBackend:
    def __init__(self):
        if USE_OPENAI_EMBEDDING and OPENAI_API_KEY:
            self.model_name = "openai"
        elif USE_DEEPSEEK_EMBEDDING and DEEPSEEK_API_KEY:
            self.model_name = "deepseek"
        else:
            self.model_name = "local"

        self._model = None
        self._client = None
        self.dim = None
        self.mem_cache: Dict[str, List[float]] = {}
        self._lock = threading.Lock()

    def _ensure_model(self):
        if self.model_name == "local":
            if self._model is None:
                try:
                    from sentence_transformers import SentenceTransformer
                    self._model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
                    self.dim = 384
                except Exception as e:
                    raise RuntimeError(
                        "缺少本地嵌入模型依赖，请先 `pip install sentence-transformers` 或使用其他嵌入服务"
                    ) from e
        elif self.model_name == "openai":
            if self._client is None:
                try:
                    from openai import OpenAI
                    self._client = OpenAI(api_key=OPENAI_API_KEY)
                    self.dim = 3072
                except Exception as e:
                    raise RuntimeError("初始化 OpenAI 客户端失败，请检查 OPENAI_API_KEY") from e
        elif self.model_name == "deepseek":
            if self._client is None:
                try:
                    from openai import OpenAI
                    self._client = OpenAI(api_key=DEEPSEEK_API_KEY, base_url="https://api.deepseek.com/v1")
                    self.dim = 1024
                except Exception as e:
                    raise RuntimeError("初始化 DeepSeek 客户端失败，请检查 DEEPSEEK_API_KEY") from e

    def embed(self, texts: List[str]) -> np.ndarray:
        if not texts:
            return np.array([], dtype=np.float32)

        with self._lock:
            uncached = []
            idx_map = []
            vecs = []
            for i, t in enumerate(texts):
                if t in self.mem_cache:
                    vecs.append(self.mem_cache[t])
                else:
                    vecs.append(None)
                    uncached.append(t)
                    idx_map.append(i)

            if uncached:
                self._ensure_model()
                if self.model_name == "local":
                    arr = self._model.encode(uncached, normalize_embeddings=True)
                    for i, t in enumerate(uncached):
                        self.mem_cache[t] = arr[i].tolist()
                else:
                    model_name = "text-embedding-3-large" if self.model_name == "openai" else "embed-embedding-v1"
                    batch_size = 100
                    all_embeddings = []
                    for i in range(0, len(uncached), batch_size):
                        batch = uncached[i:i + batch_size]
                        resp = self._client.embeddings.create(model=model_name, input=batch)
                        batch_embeddings = np.array([d.embedding for d in resp.data], dtype=np.float32)
                        norms = np.linalg.norm(batch_embeddings, axis=1, keepdims=True) + 1e-12
                        batch_embeddings = batch_embeddings / norms
                        all_embeddings.append(batch_embeddings)
                    arr = np.vstack(all_embeddings)
                    for i, t in enumerate(uncached):
                        self.mem_cache[t] = arr[i].tolist()

            p = 0
            for j, v in enumerate(vecs):
                if v is None:
                    vecs[j] = self.mem_cache[uncached[p]]
                    p += 1

            if self.dim is None and vecs:
                self.dim = len(vecs[0])

            return np.array(vecs, dtype=np.float32)


# =========================
# 向量索引
# =========================
class VectorIndex:
    def __init__(self, dim: int, persist_dir: Optional[str] = None, name: Optional[str] = None, persist: bool = False):
        self.dim = dim
        self.index = faiss.IndexFlatIP(dim)
        self.metadata: List[Dict[str, Any]] = []

        self.persist = bool(persist and persist_dir and name)
        self.persist_dir = os.path.abspath(persist_dir) if persist_dir else None
        self.name = name
        if self.persist:
            os.makedirs(self.persist_dir, exist_ok=True)
            self.index_path = os.path.join(self.persist_dir, f"{name}.faiss")
            self.meta_path = os.path.join(self.persist_dir, f"{name}.meta.json")
            if os.path.exists(self.index_path) and os.path.exists(self.meta_path):
                try:
                    self._load()
                except Exception:
                    self.index = faiss.IndexFlatIP(dim)
                    self.metadata = []

    def add(self, vectors: np.ndarray, metas: List[Dict[str, Any]]):
        if len(vectors) == 0:
            return
        if vectors.dtype != np.float32:
            vectors = vectors.astype(np.float32)
        norms = np.linalg.norm(vectors, axis=1, keepdims=True) + 1e-12
        vectors = vectors / norms
        self.index.add(vectors)
        self.metadata.extend(metas)

    def search(self, vectors: np.ndarray, topk: int = 5) -> Tuple[np.ndarray, List[List[Dict[str, Any]]]]:
        if len(vectors) == 0:
            return np.array([]), []
        if vectors.dtype != np.float32:
            vectors = vectors.astype(np.float32)
        norms = np.linalg.norm(vectors, axis=1, keepdims=True) + 1e-12
        vectors = vectors / norms
        if self.index.ntotal == 0:
            return np.zeros((len(vectors), topk)), [[None] * topk for _ in range(len(vectors))]
        D, I = self.index.search(vectors, topk)
        all_metas = []
        for row in I:
            metas = []
            for idx in row:
                if idx == -1 or idx >= len(self.metadata):
                    metas.append(None)
                else:
                    metas.append(self.metadata[idx])
            all_metas.append(metas)
        return D, all_metas

    def save(self):
        if not self.persist:
            return
        os.makedirs(self.persist_dir, exist_ok=True)
        buf = faiss.serialize_index(self.index)
        with open(self.index_path, "wb") as f:
            f.write(buf)
        with open(self.meta_path, "w", encoding="utf-8") as f:
            json.dump(self.metadata, f, ensure_ascii=False, indent=2)

    def _load(self):
        if not self.persist:
            return
        with open(self.index_path, "rb") as f:
            data = f.read()
        self.index = faiss.deserialize_index(data)
        with open(self.meta_path, "r", encoding="utf-8") as f:
            self.metadata = json.load(f)
        if self.index.d != self.dim:
            raise ValueError(f"索引维度不匹配：期望 {self.dim}，实际 {self.index.d}")


# =========================
# 主类
# =========================
class TableRecovery(SQLTableParser):
    def __init__(self, ddl_dir="schema", output_dir="recovered", recover_name=False, with_data=False,
                 persist_vdb=False,w_semantic= 0.25,w_name= 0.45,w_type= 0.20,w_desc= 0.10):
        super().__init__()
        self.ddl_dir = ddl_dir
        self.output_dir = output_dir
        self.recover_name = recover_name
        self.with_data = with_data
        self.sample_threshold = 5
        self.fk_mode = "loose"
        self.thread_local = threading.local()

        self.fk_cache = {}
        if os.path.exists(CACHE_FILE):
            try:
                with open(CACHE_FILE, "r", encoding="utf-8") as fh:
                    self.fk_cache = json.load(fh)
            except Exception:
                self.fk_cache = {}

        self.embedder = EmbeddingBackend()
        self._parent_index: Optional[VectorIndex] = None
        self._child_index: Optional[VectorIndex] = None
        self._vdb_lock = threading.Lock()
        self.persist_vdb = persist_vdb

        # 权重分配（总和为1）
        self.w_semantic = w_semantic  # 语义相似度
        self.w_name = w_name  # 名称匹配（最高权重）
        self.w_type = w_type  # 类型兼容性
        self.w_desc = w_desc  # 描述相似度

        # 阈值与 TopK
        self.topk = 3
        self.min_score = 0.65

    # ----------------- 辅助函数 -----------------
    def safe_json_loads(self, text: str) -> Optional[dict]:
        if not text:
            return None
        text = text.strip()
        try:
            return json.loads(text)
        except Exception:
            s = text.find("{")
            e = text.rfind("}")
            if s != -1 and e != -1 and e > s:
                try:
                    return json.loads(text[s:e + 1])
                except Exception:
                    return None
            return None

    # ----------------- 新增辅助函数 -----------------
    def _name_match_score(self, child_col: str, parent_table: str, parent_col: str) -> float:
        """
        更强的命名匹配评分：
        - 精确相等 -> 1.0
        - parent_col 为通用 id，child_col 为 tableprefix + "_id" 或 tableprefix + "id" -> 0.8
        - 包含表名 token 或列名 token -> 0.6
        - 部分 token 重合 -> 0.4
        - 否则 0.0
        """
        if not child_col or not parent_col:
            return 0.0
        c = _stem(child_col)
        pcol = _stem(parent_col)
        pt = _normalize_table_name(parent_table)

        # exact
        if c == pcol:
            return 1.0

        # scenario: parent_col is generic 'id' and child_col is like 'tablename_id' or 'tablenameid'
        if pcol == "id":
            if c == f"{pt}id" or c.endswith(f"{pt}id") or c == f"{pt}_id" or c.endswith(f"{pt}_id") or c.startswith(
                    pt):
                return 0.8

        # child contains parent_table name or parent column stem
        if pt and pt in c:
            return 0.65
        if pcol and pcol in c:
            return 0.7

        # token overlap
        c_tokens = set(re.split(r"[_\W]+", c))
        p_tokens = set(re.split(r"[_\W]+", pcol))
        inter = c_tokens & p_tokens
        if inter:
            # 按重合比例给分
            ratio = len(inter) / max(1, len(p_tokens))
            return min(0.6, 0.6 * ratio)

        return 0.0

    def _type_compatibility_score(self, child_dtype: str, parent_dtype: str) -> float:
        """
        返回：
        -1 -> 明显不兼容（例如 numeric vs varchar/string），直接跳过
         0..1 -> 兼容度分数（同类返回 1.0 或接近）
        0.0 -> 无法判断（都为空或未知字符串）
        1.0 -> 完全兼容（同属 numeric / string / datetime）
        """
        if not child_dtype or not parent_dtype:
            return 0.0

        child_is_num = _is_numeric(child_dtype)
        parent_is_num = _is_numeric(parent_dtype)
        child_is_str = _is_string(child_dtype)
        parent_is_str = _is_string(parent_dtype)
        child_is_dt = _is_datetime(child_dtype)
        parent_is_dt = _is_datetime(parent_dtype)

        # datetime 与其他类型不兼容
        if (child_is_dt and not parent_is_dt) or (parent_is_dt and not child_is_dt):
            return -1.0

        # numeric vs string -> 不兼容（根据你的要求直接不采用）
        if (child_is_num and parent_is_str) or (child_is_str and parent_is_num):
            return -1.0

        # same family
        if child_is_num and parent_is_num:
            return 1.0
        if child_is_str and parent_is_str:
            return 0.9
        if child_is_dt and parent_is_dt:
            return 1.0

        # 其它不确定情况，返回中性分
        return 0.35

    def _desc_similarity(self, child_comment: str, parent_comment: str) -> float:
        """
        简单注释相似度：完全相等->1，包含->0.7，token overlap比例->0..0.6
        """
        if not child_comment or not parent_comment:
            return 0.0
        a = re.sub(r"\s+", " ", child_comment).strip().lower()
        b = re.sub(r"\s+", " ", parent_comment).strip().lower()
        if a == b:
            return 1.0
        if a in b or b in a:
            return 0.7
        atoks = set(re.split(r"[\s,_\W]+", a))
        btoks = set(re.split(r"[\s,_\W]+", b))
        inter = atoks & btoks
        if not inter:
            return 0.0
        ratio = len(inter) / max(1, min(len(atoks), len(btoks)))
        return min(0.6, 0.6 * ratio)

    def _calculate_weighted_score(self, sim, name_score, type_score, desc_score, child_col, parent_col, is_pk):
        """
        计算加权分数，权重总和为1
        """
        # 基础加权（权重总和为1）
        base_score = (
                self.w_semantic * sim +
                self.w_name * name_score +
                self.w_type * type_score +
                self.w_desc * desc_score
        )

        # 置信度增强
        confidence_boost = 1.0

        # 名称高度匹配增强
        if name_score > 0.7:
            confidence_boost *= 1.2

        # 类型完全匹配增强
        if type_score == 1.0:
            confidence_boost *= 1.1

        # ID类列名增强
        child_stem = _stem(child_col)
        parent_stem = _stem(parent_col)
        if child_stem.endswith("id") and parent_stem.endswith("id"):
            confidence_boost *= 1.1

        # 非主键惩罚（在loose模式下）
        if self.fk_mode == "loose" and not is_pk:
            confidence_boost *= 0.8

        # 应用置信度增强
        final_score = base_score * confidence_boost

        return min(final_score, 1.0)  # 确保不超过1.0

    def _get_feature_weights(self, child_col, parent_col):
        """
        根据列名特征动态调整特征权重（保持总和为1）
        """
        # 基础权重
        w_semantic = self.w_semantic
        w_name = self.w_name
        w_type = self.w_type
        w_desc = self.w_desc

        # 如果列名明显是ID类，调整权重
        if _stem(child_col).endswith("id") or _stem(parent_col).endswith("id"):
            # 增加名称权重，减少其他权重
            w_name += 0.1
            w_semantic -= 0.05
            w_type -= 0.03
            w_desc -= 0.02

        # 如果列名包含表名，调整权重
        table_name = _normalize_table_name(parent_col.split(".")[0] if "." in parent_col else "")
        if table_name and table_name in _stem(child_col):
            w_name += 0.05
            w_semantic -= 0.05

        # 确保权重总和为1
        total = w_semantic + w_name + w_type + w_desc
        w_semantic /= total
        w_name /= total
        w_type /= total
        w_desc /= total

        return w_semantic, w_name, w_type, w_desc

    # ----------------- 入口 -----------------
    def __call__(self):
        print("📥 Processing SQL files (multi-threaded)...")
        files = os.listdir(self.ddl_dir)
        sql_files = [f for f in files if f.endswith(".sql")]
        total_files = len(sql_files)

        def process_file(fname):
            try:
                self.thread_local.anon_tables = {}
                self.thread_local.data = defaultdict(list)
                self.thread_local.recovered_table_map = {}
                self.thread_local.recovered_col_map = {}

                if not fname.endswith(".sql"):
                    return

                file_path = os.path.join(self.ddl_dir, fname)

                with open(file_path, "r", encoding="utf-8") as f:
                    sql_content = f.read()

                tables = self.parse_create_table(sql_content)
                for table in tables:
                    self.thread_local.anon_tables[table['name']] = table['columns']

                self._init_recovered_map()
                if self.recover_name:
                    self._recover_names()

                base = os.path.splitext(fname)[0]
                fks = self._infer_foreign_keys_vdb(sql_content, tables, index_name=f"parent_index_{_stem(base)}")

                if not fks:
                    fks = self._find_potential_fks_heuristic()

                print(f"发现 {len(fks)} 个外键关系")
                self._write_outputs(fks, sql_content, fname)
            except Exception as e:
                print(f"❌ 处理文件失败: {fname}\n{e}\n{traceback.format_exc()}")

        with ThreadPoolExecutor(max_workers=min(8, max(1, os.cpu_count() or 4))) as executor:
            list(tqdm(executor.map(process_file, sql_files), total=total_files, desc="Processing DDL files"))
        print(f"\n✅ All outputs saved in: {self.output_dir}")

    def _init_recovered_map(self):
        for table, cols in self.thread_local.anon_tables.items():
            self.thread_local.recovered_table_map[table] = table
            for col in cols:
                self.thread_local.recovered_col_map[(table, col['name'])] = col['name']

    def _recover_names(self):
        # 简单的名称恢复逻辑，可以根据需要扩展
        for anon_table, anon_cols in self.thread_local.anon_tables.items():
            for col in anon_cols:
                col_name = col['name']
                if col_name.lower().endswith('_no'):
                    new_name = col_name.replace('_no', '_number')
                    self.thread_local.recovered_col_map[(anon_table, col_name)] = new_name

    # ----------------- 基于向量+规则的外键推断 -----------------
    def _infer_foreign_keys_vdb(self, sql_content: str, tables, index_name: str) -> List[Tuple[str, str, str, str]]:
        parent_descs, parent_metas = [], []
        child_descs, child_metas = [], []

        table_real_name = lambda t: self.thread_local.recovered_table_map.get(t['name'], t['name'])

        # ---------- 构建表->主键 map ----------
        table_pk_map: Dict[str, List[str]] = defaultdict(list)
        for t in tables:
            tname = table_real_name(t)
            for c in t['columns']:
                cname = self.thread_local.recovered_col_map.get((t['name'], c['name']), c['name'])
                if c.get('is_primary', False):
                    table_pk_map[tname].append(cname)
            # fallback：如果没显式主键，用 id-like
            if not table_pk_map[tname]:
                for c in t['columns']:
                    cname = self.thread_local.recovered_col_map.get((t['name'], c['name']), c['name'])
                    stem = _stem(cname)
                    if stem in {"id", f"{_normalize_table_name(tname)}id"}:
                        table_pk_map[tname].append(cname)

        # ---------- PARENT 候选 ----------
        for t in tables:
            tname = table_real_name(t)
            pk_cols = table_pk_map.get(tname, [])
            is_composite_pk = len(pk_cols) > 1  # 联合主键

            for c in t['columns']:
                cname = self.thread_local.recovered_col_map.get((t['name'], c['name']), c['name'])
                dtype = c.get('type', '')
                comment = c.get('comment', '')

                if _is_datetime(dtype):
                    continue
                if _stem(cname) in {"flag", "type", "status"} and is_composite_pk:
                    continue  # 联合主键里的噪声列直接丢弃

                is_parent_like = (
                        cname in pk_cols or
                        _stem(cname) in {"id", f"{_normalize_table_name(tname)}id"} or
                        _stem(cname).endswith("code") or _stem(cname).endswith("no") or _stem(cname).endswith("number")
                )
                if is_parent_like:
                    desc = f"[PARENT] table={tname} column={cname} dtype={dtype} comment={comment} role=primary_or_unique"
                    parent_descs.append(desc)
                    parent_metas.append({
                        "table": tname,
                        "column": cname,
                        "dtype": dtype,
                        "comment": comment,
                        "role": "parent",
                        "is_pk": cname in pk_cols,
                        "is_composite_pk": is_composite_pk
                    })

        # ---------- CHILD 候选 ----------
        noise_cols = {"createdat", "updatedat", "timestamp", "status", "type", "name", "desc", "remark", "flag",
                      "isactive", "is_active", "description"}
        for t in tables:
            tname = table_real_name(t)
            pk_cols = set(table_pk_map.get(tname, []))
            for c in t['columns']:
                cname = self.thread_local.recovered_col_map.get((t['name'], c['name']), c['name'])
                dtype = c.get('type', '')
                comment = c.get('comment', '')

                if _is_datetime(dtype):
                    continue
                if cname in pk_cols:
                    continue
                if _stem(cname) in noise_cols:
                    continue

                desc = f"[CHILD] table={tname} column={cname} dtype={dtype} comment={comment} role=child"
                child_descs.append(desc)
                child_metas.append({
                    "table": tname,
                    "column": cname,
                    "dtype": dtype,
                    "comment": comment,
                    "role": "child"
                })

        if not child_descs or not parent_descs:
            return []

        # ---------- 向量嵌入 ----------
        parent_vecs = self.embedder.embed(parent_descs)
        child_vecs = self.embedder.embed(child_descs)
        if self.embedder.dim is None:
            return []

        # build / load parent index（线程安全）
        vdb_dir = os.path.join(self.output_dir, ".vdb")
        os.makedirs(vdb_dir, exist_ok=True)
        with self._vdb_lock:
            self._parent_index = VectorIndex(dim=self.embedder.dim, persist_dir=vdb_dir, name=index_name,
                                             persist=self.persist_vdb)
            if len(self._parent_index.metadata) == 0:
                self._parent_index.add(parent_vecs, parent_metas)
                self._parent_index.save()

        scores, metas = self._parent_index.search(child_vecs, topk=self.topk)

        # ---------- 收集候选并评分 ----------
        accepted = []
        for i, child_meta in enumerate(child_metas):
            child_table = child_meta["table"]
            child_col = child_meta["column"]
            child_dtype = child_meta.get("dtype", "")
            child_comment = child_meta.get("comment", "")

            cand_scores = scores[i]
            cand_metas = metas[i]

            best_local = (None, -1.0, None)

            for j, pm in enumerate(cand_metas):
                if pm is None:
                    continue
                parent_table = pm["table"]
                parent_col = pm["column"]
                parent_dtype = pm.get("dtype", "")
                parent_comment = pm.get("comment", "")
                is_pk = pm.get("is_pk", False)
                is_composite_pk = pm.get("is_composite_pk", False)

                if _normalize_table_name(child_table) == _normalize_table_name(parent_table):
                    continue

                sim = float(cand_scores[j])
                name_score = self._name_match_score(child_col, parent_table, parent_col)
                type_score = self._type_compatibility_score(child_dtype, parent_dtype)
                if type_score < 0:
                    continue
                desc_score = self._desc_similarity(child_comment, parent_comment)

                # 根据列名特征动态调整权重
                w_semantic, w_name, w_type, w_desc = self._get_feature_weights(child_col, parent_col)

                # 计算加权分数（权重总和为1）
                weighted_sum = (w_semantic * sim +
                                w_name * name_score +
                                w_type * type_score +
                                w_desc * desc_score)

                # 应用置信度调整
                score = self._calculate_weighted_score(sim, name_score, type_score, desc_score,
                                                       child_col, parent_col, is_pk)

                # ---------- 联合主键附加规则 ----------
                if is_composite_pk:
                    col_stem = _stem(parent_col)
                    if col_stem in {"flag", "type", "status"}:
                        continue
                    if not (col_stem.endswith("id") or col_stem.endswith("no") or col_stem.endswith("code")):
                        score *= 0.7

                # ---------- 噪声列再过滤 ----------
                if _stem(child_col) in noise_cols:
                    continue

                if score > best_local[1]:
                    best_local = ((parent_table, parent_col), score, (sim, name_score, type_score, desc_score))

            if best_local[0] is not None and best_local[1] >= self.min_score:
                (pt, pc), sc, comps = best_local
                accepted.append((child_table, child_col, pt, pc, sc))

        # ---------- 去重 ----------
        best_by_pair: Dict[Tuple[str, str], Tuple[str, str, float]] = {}
        for child_table, child_col, parent_table, parent_col, sc in accepted:
            key = (child_table, parent_table)
            if key not in best_by_pair or sc > best_by_pair[key][2]:
                best_by_pair[key] = (child_col, parent_col, sc)

        best_by_child: Dict[Tuple[str, str], Tuple[str, str, float]] = {}
        for (child_table, parent_table), (child_col, parent_col, sc) in best_by_pair.items():
            key = (child_table, child_col)
            if key not in best_by_child or sc > best_by_child[key][2]:
                best_by_child[key] = (parent_table, parent_col, sc)

        results = []
        for (child_table, child_col), (parent_table, parent_col, sc) in best_by_child.items():
            if _normalize_table_name(child_table) == _normalize_table_name(parent_table):
                continue
            results.append((child_table, child_col, parent_table, parent_col))

        return results

        # ----------------- 启发式兜底（保持但略微改进） -----------------

    def _find_potential_fks_heuristic(self) -> List[Tuple[str, str, str, str]]:
        """
        保留你原来的启发式兜底，但加强了表名 token 检查，并严格避免自关联。
        """
        fks = []
        tables_columns = {}
        table_pk_map = defaultdict(list)

        for table, columns in self.thread_local.anon_tables.items():
            real_table = self.thread_local.recovered_table_map.get(table, table)
            real_columns = [self.thread_local.recovered_col_map.get((table, col['name']), col['name']) for col in
                            columns]
            tables_columns[real_table] = real_columns

            for col in columns:
                if col.get('is_primary', False):
                    col_name = self.thread_local.recovered_col_map.get((table, col['name']), col['name'])
                    table_pk_map[real_table].append(col_name)

            if not table_pk_map[real_table]:
                for col in columns:
                    col_name = self.thread_local.recovered_col_map.get((table, col['name']), col['name'])
                    stem = _stem(col_name)
                    if stem == "id" or stem == f"{_normalize_table_name(real_table)}id":
                        table_pk_map[real_table].append(col_name)

        table_names = list(tables_columns.keys())
        for table1 in table_names:
            for table2 in table_names:
                if _normalize_table_name(table1) == _normalize_table_name(table2):
                    continue

                table2_pks = table_pk_map.get(table2, [])
                if not table2_pks:
                    continue

                for col in tables_columns[table1]:
                    col_lower = col.lower()
                    table2_norm = _normalize_table_name(table2)

                    is_fk_like = (
                            col_lower.endswith("_id") or
                            col_lower.endswith("_no") or
                            col_lower.endswith("_code") or
                            col_lower.endswith("_number") or
                            (table2_norm in col_lower and len(table2_norm) > 2)
                    )
                    if not is_fk_like:
                        # 额外放行：如果列名包含任何 table2 的 token，也认为有潜力
                        if table2_norm in _stem(col):
                            is_fk_like = True

                    if not is_fk_like:
                        continue

                    best_pk = None
                    best_score = 0
                    for pk_col in table2_pks:
                        pk_stem = _stem(pk_col)
                        col_stem = _stem(col)
                        score = 0
                        if col_stem.endswith("id") and pk_stem.endswith("id"):
                            score += 0.5
                        if col_stem.endswith("no") and pk_stem.endswith("no"):
                            score += 0.5
                        if col_stem.endswith("code") and pk_stem.endswith("code"):
                            score += 0.5
                        if table2_norm in col_stem and table2_norm in pk_stem:
                            score += 0.3
                        if col_stem == pk_stem:
                            score += 1.0
                        if score > best_score:
                            best_score = score
                            best_pk = pk_col

                    if best_pk and best_score >= 0.5:
                        # 再加一个自关联保护
                        if _normalize_table_name(table1) != _normalize_table_name(table2):
                            fks.append((table1, col, table2, best_pk))

        # 对每对表保留最高（避免多对多重复）
        per_pair = {}
        for t1, c1, t2, c2 in fks:
            key = (t1, t2)
            score = 0.5 * _name_bonus(c1, t2, c2)  # 使用原来的 name bonus 作近似评分
            if key not in per_pair or score > per_pair[key][2]:
                per_pair[key] = (c1, c2, score)
        out = [(t1, v[0], t2, v[1]) for (t1, t2), v in per_pair.items()]
        return out

    # ----------------- 输出 -----------------
    def _write_outputs(self, fks, original_sql: str, fname: str):
        base = os.path.splitext(fname)[0]
        out_path = os.path.join(self.output_dir, f"{base}.sql")
        out_mmd = os.path.join(self.output_dir, f"{base}.mmd")

        # 先按表分组外键信息
        fk_map = defaultdict(list)
        for t1, c1, t2, c2 in fks:
            fk_map[t1].append((c1, t2, c2))

        # 找到 CREATE TABLE 语句并注入外键
        create_pattern = r'(CREATE\s+TABLE\s+`?\w+`?\s*\()(.*?)(\)\s*ENGINE=.*?;)'  # 分三段

        def inject_fks(match):
            prefix, body, suffix = match.groups()
            table_name_match = re.search(r'CREATE\s+TABLE\s+`?(\w+)`?', prefix, re.I)
            table_name = table_name_match.group(1) if table_name_match else None
            extra = []
            if table_name in fk_map:
                for idx, (col, pt, pc) in enumerate(fk_map[table_name], 1):
                    extra.append(
                        f"  CONSTRAINT fk_{table_name}_{col}_{idx} FOREIGN KEY (`{col}`) REFERENCES `{pt}`(`{pc}`)")
            if extra:
                return prefix + body.strip() + ",\n" + ",\n".join(extra) + "\n" + suffix
            else:
                return match.group(0)

        modified_sql = re.sub(create_pattern, inject_fks, original_sql, flags=re.S | re.I)

        # 写文件
        with open(out_path, "w", encoding="utf-8") as f:
            f.write("-- 📄 Original SQL + Inline FKs\n")
            f.write(modified_sql)

        # Mermaid 输出保持不变
        with open(out_mmd, "w", encoding="utf-8") as f:
            f.write("erDiagram\n")
            for t1, c1, t2, c2 in fks:
                f.write(f"    {t2} ||--o{{ {t1} : \"{c1}\"\n")